diff -Nru cargo-0.17.0/appveyor.yml cargo-0.19.0/appveyor.yml --- cargo-0.17.0/appveyor.yml 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/appveyor.yml 2017-05-16 03:23:10.000000000 +0000 @@ -30,7 +30,7 @@ # FIXME(#3394) use master rustup - curl -sSfO https://static.rust-lang.org/rustup/archive/0.6.5/x86_64-pc-windows-msvc/rustup-init.exe - - rustup-init.exe -y --default-host x86_64-pc-windows-msvc --default-toolchain nightly + - rustup-init.exe -y --default-host x86_64-pc-windows-msvc --default-toolchain nightly-2017-03-03 - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin - if NOT "%TARGET%" == "x86_64-pc-windows-msvc" rustup target add %TARGET% - if defined OTHER_TARGET rustup target add %OTHER_TARGET% diff -Nru cargo-0.17.0/Cargo.lock cargo-0.19.0/Cargo.lock --- cargo-0.17.0/Cargo.lock 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/Cargo.lock 2017-05-16 03:23:10.000000000 +0000 @@ -1,39 +1,43 @@ [root] name = "cargo" -version = "0.17.0" +version = "0.19.0" dependencies = [ "advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "bufstream 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "cargotest 0.1.0", - "crates-io 0.6.0", + "chrono 0.2.25 (registry+https://github.com/rust-lang/crates.io-index)", + "crates-io 0.8.0", "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "curl 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "docopt 0.6.86 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "fs2 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", + "fs2 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", "git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "libgit2-sys 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)", "psapi-sys 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_ignored 0.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.9.9 (registry+https://github.com/rust-lang/crates.io-index)", "shell-escape 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tar 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tar 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "term 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -55,6 +59,14 @@ ] [[package]] +name = "aho-corasick" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] name = "bitflags" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -69,19 +81,21 @@ version = "0.1.0" dependencies = [ "bufstream 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "cargo 0.17.0", + "cargo 0.19.0", "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", "hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tar 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.9.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tar 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "term 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -91,20 +105,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] +name = "chrono" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "num 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] name = "cmake" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.39 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.45 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crates-io" -version = "0.6.0" +version = "0.8.0" dependencies = [ - "curl 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.9.9 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -114,84 +139,89 @@ [[package]] name = "curl" -version = "0.4.1" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "curl-sys 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-probe 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "curl-sys 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-probe 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "curl-sys" -version = "0.3.6" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.39 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "docopt" -version = "0.6.86" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lazy_static 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", + "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "env_logger" -version = "0.3.5" +name = "dtoa" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] -name = "error-chain" -version = "0.7.2" +name = "env_logger" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "filetime" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "flate2" -version = "0.2.14" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "miniz-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", + "miniz-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] +name = "foreign-types" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] name = "fs2" -version = "0.3.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "gcc" -version = "0.3.39" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -205,15 +235,15 @@ [[package]] name = "git2" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "libgit2-sys 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-probe 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-probe 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -221,10 +251,10 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "curl 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -237,7 +267,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", + "num 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -247,11 +277,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] +name = "itoa" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] name = "kernel32-sys" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -262,27 +297,27 @@ [[package]] name = "lazy_static" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.18" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libgit2-sys" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cmake 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", - "curl-sys 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "gcc 0.3.39 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "curl-sys 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "libssh2-sys 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -290,11 +325,11 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cmake 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -302,14 +337,14 @@ version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.39 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "log" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -322,155 +357,155 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "metadeps" -version = "1.1.1" +name = "memchr" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "error-chain 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "miniz-sys" -version = "0.1.7" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.39 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "miow" -version = "0.1.3" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.26 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.27 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "net2" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num" -version = "0.1.36" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-bigint 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "num-complex 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "num-integer 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", - "num-iter 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", - "num-rational 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", + "num-bigint 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-complex 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", + "num-integer 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", + "num-iter 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", + "num-rational 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-bigint" -version = "0.1.35" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-integer 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", + "num-integer 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-complex" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-traits 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-integer" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-traits 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-iter" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-integer 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", + "num-integer 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-rational" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-bigint 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "num-integer 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", + "num-bigint 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-integer 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-traits" -version = "0.1.36" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "num_cpus" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "openssl" -version = "0.9.6" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "openssl-probe" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "openssl-sys" -version = "0.9.6" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "gcc 0.3.45 (registry+https://github.com/rust-lang/crates.io-index)", "gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "metadeps 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "user32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "pkg-config" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -483,14 +518,24 @@ ] [[package]] +name = "quote" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] name = "rand" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] +name = "redox_syscall" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] name = "regex" version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -503,30 +548,85 @@ ] [[package]] +name = "regex" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] name = "regex-syntax" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] +name = "regex-syntax" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] name = "rustc-serialize" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "semver" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "semver-parser 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "semver-parser" -version = "0.6.1" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde_codegen_internals" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lazy_static 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_derive" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_codegen_internals 0.14.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_ignored" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_json" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "dtoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -536,16 +636,34 @@ [[package]] name = "strsim" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] +name = "syn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "synom" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] name = "tar" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -553,12 +671,12 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "term" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -571,7 +689,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread-id" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -583,16 +710,36 @@ ] [[package]] +name = "thread_local" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "time" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] name = "toml" -version = "0.2.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "unicode-bidi" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -600,12 +747,25 @@ [[package]] name = "unicode-normalization" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] +name = "unicode-xid" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unreachable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] name = "url" -version = "1.2.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -627,6 +787,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] +name = "utf8-ranges" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] name = "winapi" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -648,71 +818,93 @@ [metadata] "checksum advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e06588080cb19d0acb6739808aafa5f26bfb2ca015b2b6370028b44cf7cb8a9a" "checksum aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ca972c2ea5f742bfce5687b9aef75506a764f61d37f8f649047846a9686ddb66" +"checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699" "checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d" "checksum bufstream 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7b48dbe2ff0e98fa2f03377d204a9637d3c9816cd431bfe05a8abbd0ea11d074" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" -"checksum cmake 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)" = "8ebde6558caa6cf9bffe5750c66c517e7f9d470d59fcd48b0acbc0a02d62a82a" +"checksum chrono 0.2.25 (registry+https://github.com/rust-lang/crates.io-index)" = "9213f7cd7c27e95c2b57c49f0e69b1ea65b27138da84a170133fd21b07659c00" +"checksum cmake 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "d18d68987ed4c516dcc3e7913659bfa4076f5182eea4a7e0038bb060953e76ac" "checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97" -"checksum curl 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fd5a1fdcebdb1a59578c5583e66ffed2d13850eac4f51ff730edf6dd6111eac" -"checksum curl-sys 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "218a149208e1f4e5f7e20f1d0ed1e9431a086a6b4333ff95dba82237be9c283a" -"checksum docopt 0.6.86 (registry+https://github.com/rust-lang/crates.io-index)" = "4a7ef30445607f6fc8720f0a0a2c7442284b629cf0d049286860fae23e71c4d9" -"checksum env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f" -"checksum error-chain 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "318cb3c71ee4cdea69fdc9e15c173b245ed6063e1709029e8fd32525a881120f" +"checksum curl 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c90e1240ef340dd4027ade439e5c7c2064dd9dc652682117bd50d1486a3add7b" +"checksum curl-sys 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)" = "c0d909dc402ae80b6f7b0118c039203436061b9d9a3ca5d2c2546d93e0a61aaa" +"checksum docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8" +"checksum dtoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "80c8b71fd71146990a9742fc06dcbbde19161a267e0ad4e572c35162f4578c90" +"checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83" "checksum filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922" -"checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" -"checksum fs2 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "640001e1bd865c7c32806292822445af576a6866175b5225aa2087ca5e3de551" -"checksum gcc 0.3.39 (registry+https://github.com/rust-lang/crates.io-index)" = "771e4a97ff6f237cf0f7d5f5102f6e28bb9743814b6198d684da5c58b76c11e0" +"checksum flate2 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)" = "d4e4d0c15ef829cbc1b7cda651746be19cceeb238be7b1049227b14891df9e25" +"checksum foreign-types 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e4056b9bd47f8ac5ba12be771f77a0dae796d1bbaaf5fd0b9c2d38b69b8a29d" +"checksum fs2 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "34edaee07555859dc13ca387e6ae05686bb4d0364c95d649b6dab959511f4baf" +"checksum gcc 0.3.45 (registry+https://github.com/rust-lang/crates.io-index)" = "40899336fb50db0c78710f53e87afc54d8c7266fb76262fecc78ca1a7f09deae" "checksum gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0912515a8ff24ba900422ecda800b52f4016a56251922d397c576bf92c690518" -"checksum git2 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0534ca86640c6a3a0687cc6bee9ec4032509a0d112d97e8241fa6b7e075f6119" +"checksum git2 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "046ae03385257040b2a35e56d9669d950dd911ba2bf48202fbef73ee6aab27b2" "checksum git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "68676bc784bf0bef83278898929bf64a251e87c0340723d0b93fa096c9c5bf8e" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bf088f042a467089e9baa4972f57f9247e42a0cc549ba264c7a04fbb8ecb89d4" "checksum idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1053236e00ce4f668aeca4a769a09b3bf5a682d802abd6f3cb39374f6b162c11" +"checksum itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eb2f404fbc66fd9aac13e998248505e7ecb2ad8e44ab6388684c5fb11c6c251c" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum lazy_static 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6abe0ee2e758cd6bc8a2cd56726359007748fbf4128da998b65d0b70f881e19b" -"checksum libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "a51822fc847e7a8101514d1d44e354ba2ffa7d4c194dcab48870740e327cac70" -"checksum libgit2-sys 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c7a4e33e9f8b8883c1a5898e72cdc63c00c4f2265283651533b00373094e901c" +"checksum lazy_static 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4732c563b9a21a406565c4747daa7b46742f082911ae4753f390dc9ec7ee1a97" +"checksum libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)" = "88ee81885f9f04bff991e306fea7c1c60a5f0f9e409e99f6b40e3311a3363135" +"checksum libgit2-sys 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "d951fd5eccae07c74e8c2c1075b05ea1e43be7f8952245af8c2840d1480b1d95" "checksum libssh2-sys 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "91e135645c2e198a39552c8c7686bb5b83b1b99f64831c040a6c2798a1195934" "checksum libz-sys 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e5ee912a45d686d393d5ac87fac15ba0ba18daae14e8e7543c63ebf7fb7e970c" -"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054" +"checksum log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5141eca02775a762cc6cd564d8d2c50f67c0ea3a372cbf1c51592b3e029e10ad" "checksum matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "efd7622e3022e1a6eaa602c4cea8912254e5582c9c692e9167714182244801b1" "checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" -"checksum metadeps 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "829fffe7ea1d747e23f64be972991bc516b2f1ac2ae4a3b33d8bea150c410151" -"checksum miniz-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "9d1f4d337a01c32e1f2122510fed46393d53ca35a7f429cb0450abaedfa3ed54" -"checksum miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d5bfc6782530ac8ace97af10a540054a37126b63b0702ddaaa243b73b5745b9a" -"checksum net2 0.2.26 (registry+https://github.com/rust-lang/crates.io-index)" = "5edf9cb6be97212423aed9413dd4729d62b370b5e1c571750e882cebbbc1e3e2" -"checksum num 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)" = "bde7c03b09e7c6a301ee81f6ddf66d7a28ec305699e3d3b056d2fc56470e3120" -"checksum num-bigint 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "88b14378471f7c2adc5262f05b4701ef53e8da376453a8d8fee48e51db745e49" -"checksum num-complex 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c78e054dd19c3fd03419ade63fa661e9c49bb890ce3beb4eee5b7baf93f92f" -"checksum num-integer 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "fb24d9bfb3f222010df27995441ded1e954f8f69cd35021f6bef02ca9552fb92" -"checksum num-iter 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "287a1c9969a847055e1122ec0ea7a5c5d6f72aad97934e131c83d5c08ab4e45c" -"checksum num-rational 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "54ff603b8334a72fbb27fe66948aac0abaaa40231b3cecd189e76162f6f38aaf" -"checksum num-traits 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)" = "a16a42856a256b39c6d3484f097f6713e14feacd9bfb02290917904fae46c81c" -"checksum num_cpus 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8890e6084723d57d0df8d2720b0d60c6ee67d6c93e7169630e4371e88765dcad" -"checksum openssl 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0c00da69323449142e00a5410f0e022b39e8bbb7dc569cee8fc6af279279483c" -"checksum openssl-probe 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "756d49c8424483a3df3b5d735112b4da22109ced9a8294f1f5cdf80fb3810919" -"checksum openssl-sys 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b1482f9a06f56c906007e17ea14d73d102210b5d27bc948bf5e175f493f3f7c3" -"checksum pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8cee804ecc7eaf201a4a207241472cc870e825206f6c031e3ee2a72fa425f2fa" +"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4" +"checksum miniz-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "28eaee17666671fa872e567547e8428e83308ebe5808cdf6a0e28397dbe2c726" +"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +"checksum net2 0.2.27 (registry+https://github.com/rust-lang/crates.io-index)" = "18b9642ad6222faf5ce46f6966f59b71b9775ad5758c9e09fcf0a6c8061972b4" +"checksum num 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "98b15ba84e910ea7a1973bccd3df7b31ae282bf9d8bd2897779950c9b8303d40" +"checksum num-bigint 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "ba6d838b16e56da1b6c383d065ff1ec3c7d7797f65a3e8f6ba7092fd87820bac" +"checksum num-complex 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)" = "3534898d8a1f6b16c12f9fc2f4eaabc7ecdcc55f267213caa8988fdc7d60ff94" +"checksum num-integer 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)" = "21e4df1098d1d797d27ef0c69c178c3fab64941559b290fcae198e0825c9c8b5" +"checksum num-iter 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)" = "f7d1891bd7b936f12349b7d1403761c8a0b85a18b148e9da4429d5d102c1a41e" +"checksum num-rational 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)" = "c2dc5ea04020a8f18318ae485c751f8cfa1c0e69dcf465c29ddaaa64a313cc44" +"checksum num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "e1cbfa3781f3fe73dc05321bed52a06d2d491eaa764c52335cf4399f046ece99" +"checksum num_cpus 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a18c392466409c50b87369414a2680c93e739aedeb498eb2bff7d7eb569744e2" +"checksum openssl 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d8aa0eb7aad44f0da6f7dda13ddb4559d91a0f40cfab150b1f76ad5b39ec523f" +"checksum openssl-probe 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d98df0270d404ccd3c050a41d579c52d1db15375168bb3471e04ec0f5f378daf" +"checksum openssl-sys 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)" = "14f5bfd12054d764510b887152d564ba11d99ae24ea7d740781778f646620576" +"checksum pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3a8b4c6b8165cd1a1cd4b9b120978131389f64bdaf456435caa41e630edba903" "checksum psapi-sys 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "abcd5d1a07d360e29727f757a9decb3ce8bc6e0efa8969cfaad669a8317a2478" -"checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5" +"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" +"checksum rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "022e0636ec2519ddae48154b028864bdce4eaf7d35226ab8e65c611be97b189d" +"checksum redox_syscall 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "29dbdfd4b9df8ab31dec47c6087b7b13cbf4a776f335e4de8efba8288dda075b" "checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f" +"checksum regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4278c17d0f6d62dfef0ab00028feb45bd7d2102843f80763474eeb1be8a10c01" "checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" -"checksum rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)" = "bff9fc1c79f2dec76b253273d07682e94a978bd8f132ded071188122b2af9818" -"checksum semver 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ae2ff60ecdb19c255841c066cbfa5f8c2a4ada1eb3ae47c77ab6667128da71f5" -"checksum semver-parser 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e88e43a5a74dd2a11707f9c21dfd4a423c66bd871df813227bb0a3e78f3a1ae9" +"checksum regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9191b1f57603095f105d317e375d19b1c9c5c3185ea9633a99a6dcbed04457" +"checksum rustc-serialize 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "684ce48436d6465300c9ea783b6b14c4361d6b8dcbb1375b486a69cc19e2dfb0" +"checksum semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" +"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +"checksum serde 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)" = "f023838e7e1878c679322dc7f66c3648bd33763a215fad752f378a623856898d" +"checksum serde_codegen_internals 0.14.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bc888bd283bd2420b16ad0d860e35ad8acb21941180a83a189bb2046f9d00400" +"checksum serde_derive 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)" = "ebb753639f6d55ba1acbcd330ccaf4d9f5862353ac2851e43eac63c2a5343a11" +"checksum serde_ignored 0.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4b3f5576874721d14690657e9f0ed286e72a52be2f6fdc0cf2f024182bd8f64" +"checksum serde_json 0.9.9 (registry+https://github.com/rust-lang/crates.io-index)" = "dbc45439552eb8fb86907a2c41c1fd0ef97458efb87ff7f878db466eb581824e" "checksum shell-escape 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "dd5cc96481d54583947bfe88bf30c23d53f883c6cd0145368b69989d97b84ef8" -"checksum strsim 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "50c069df92e4b01425a8bf3576d5d417943a6a7272fbabaf5bd80b1aaa76442e" -"checksum tar 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)" = "0c9048e27119ff1fcf5b0e147ca0936d911b607d87440b042d4ecaa111b523ee" +"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694" +"checksum syn 0.11.9 (registry+https://github.com/rust-lang/crates.io-index)" = "480c834701caba3548aa991e54677281be3a5414a9d09ddbdf4ed74a569a9d19" +"checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" +"checksum tar 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "1eb3bf6ec92843ca93f4fcfb5fc6dfe30534815b147885db4b5759b8e2ff7d52" "checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6" -"checksum term 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3deff8a2b3b6607d6d7cc32ac25c0b33709453ca9cceac006caac51e963cf94a" +"checksum term 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d168af3930b369cfe245132550579d47dfd873d69470755a19c2c6568dbbd989" "checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03" +"checksum thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4437c97558c70d129e40629a5b385b3fb1ffac301e63941335e4d354081ec14a" "checksum thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8576dbbfcaef9641452d5cf0df9b0e7eeab7694956dd33bb61515fb8f18cfdd5" -"checksum toml 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "736b60249cb25337bc196faa43ee12c705e426f3d55c214d73a4e7be06f92cb4" -"checksum unicode-bidi 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c1f7ceb96afdfeedee42bade65a0d585a6a0106f681b6749c8ff4daa8df30b3f" -"checksum unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "26643a2f83bac55f1976fb716c10234485f9202dcd65cfbdf9da49867b271172" -"checksum url 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "48ccf7bd87a81b769cf84ad556e034541fb90e1cd6d4bc375c822ed9500cd9d7" +"checksum thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c85048c6260d17cf486ceae3282d9fb6b90be220bf5b28c400f5485ffc29f0c7" +"checksum time 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)" = "211b63c112206356ef1ff9b19355f43740fc3f85960c598a93d3a3d3ba7beade" +"checksum toml 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bd86ad9ebee246fdedd610e0f6d0587b754a3d81438db930a244d0480ed7878f" +"checksum unicode-bidi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d3a078ebdd62c0e71a709c3d53d2af693fe09fe93fbff8344aebe289b78f9032" +"checksum unicode-normalization 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "e28fa37426fceeb5cf8f41ee273faa7c82c47dc8fba5853402841e665fcd86ff" +"checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" +"checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91" +"checksum url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f5ba8a749fb4479b043733416c244fa9d1d3af3d7c23804944651c8a448cb87e" "checksum user32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ef4711d107b21b410a3a974b1204d9accc8b10dad75d8324b5d755de1617d47" "checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f" +"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122" +"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" diff -Nru cargo-0.17.0/Cargo.toml cargo-0.19.0/Cargo.toml --- cargo-0.17.0/Cargo.toml 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/Cargo.toml 2017-05-16 03:23:10.000000000 +0000 @@ -1,13 +1,13 @@ [package] name = "cargo" -version = "0.17.0" +version = "0.19.0" authors = ["Yehuda Katz ", "Carl Lerche ", "Alex Crichton "] license = "MIT/Apache-2.0" homepage = "https://crates.io" repository = "https://github.com/rust-lang/cargo" -documentation = "http://doc.crates.io/cargo" +documentation = "https://docs.rs/cargo" description = """ Cargo, a package manager for Rust. """ @@ -17,14 +17,15 @@ path = "src/cargo/lib.rs" [dependencies] -crates-io = { path = "src/crates-io", version = "0.6" } +chrono = "0.2.25" +crates-io = { path = "src/crates-io", version = "0.8" } crossbeam = "0.2" -curl = "0.4" -docopt = "0.6" -env_logger = "0.3" +curl = "0.4.6" +docopt = "0.7" +env_logger = "0.4" filetime = "0.1" flate2 = "0.2" -fs2 = "0.3" +fs2 = "0.4" git2 = "0.6" git2-curl = "0.7" glob = "0.2" @@ -32,14 +33,17 @@ libgit2-sys = "0.6" log = "0.3" num_cpus = "1.0" -regex = "0.1" rustc-serialize = "0.3" -semver = "0.5.0" +semver = "0.6.0" +serde = "0.9" +serde_derive = "0.9" +serde_json = "0.9" +serde_ignored = "0.0.2" shell-escape = "0.1" tar = { version = "0.4", default-features = false } tempdir = "0.3" term = "0.4.4" -toml = "0.2" +toml = "0.3" url = "1.1" [target.'cfg(unix)'.dependencies] @@ -48,12 +52,12 @@ [target.'cfg(windows)'.dependencies] advapi32-sys = "0.2" kernel32-sys = "0.2" -miow = "0.1" +miow = "0.2" psapi-sys = "0.1" winapi = "0.2" [dev-dependencies] -hamcrest = "0.1" +hamcrest = "=0.1.1" bufstream = "0.1" filetime = "0.1" cargotest = { path = "tests/cargotest" } diff -Nru cargo-0.17.0/debian/changelog cargo-0.19.0/debian/changelog --- cargo-0.17.0/debian/changelog 2017-03-24 17:03:37.000000000 +0000 +++ cargo-0.19.0/debian/changelog 2017-08-16 09:09:12.000000000 +0000 @@ -1,3 +1,11 @@ +cargo (0.19.0-0ubuntu1) artful; urgency=medium + + * Update to 0.19.0 + + * Refresh debian/patches/clean-cargo-deps.patch + + -- Chris Coulson Wed, 16 Aug 2017 10:09:12 +0100 + cargo (0.17.0-0ubuntu1) zesty; urgency=medium * Update to 0.17.0, which is required to work with rustc 1.16 diff -Nru cargo-0.17.0/debian/patches/clean-cargo-deps.patch cargo-0.19.0/debian/patches/clean-cargo-deps.patch --- cargo-0.17.0/debian/patches/clean-cargo-deps.patch 2017-03-24 17:03:11.000000000 +0000 +++ cargo-0.19.0/debian/patches/clean-cargo-deps.patch 2017-08-16 09:09:04.000000000 +0000 @@ -6,20 +6,20 @@ Forwarded: not-needed --- a/Cargo.toml +++ b/Cargo.toml -@@ -52,11 +52,11 @@ +@@ -56,11 +56,11 @@ miow = "0.2" psapi-sys = "0.1" winapi = "0.2" - + -[dev-dependencies] --hamcrest = "0.1" +-hamcrest = "=0.1.1" -bufstream = "0.1" -filetime = "0.1" -cargotest = { path = "tests/cargotest" } +#[dev-dependencies] -+#hamcrest = "0.1" ++#hamcrest = "=0.1.1" +#bufstream = "0.1" +#filetime = "0.1" +#cargotest = { path = "tests/cargotest" } - + [[bin]] name = "cargo" diff -Nru cargo-0.17.0/Makefile.in cargo-0.19.0/Makefile.in --- cargo-0.17.0/Makefile.in 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/Makefile.in 2017-05-16 03:23:10.000000000 +0000 @@ -1,4 +1,4 @@ -CFG_RELEASE_NUM=0.17.0 +CFG_RELEASE_NUM=0.18.0 CFG_RELEASE_LABEL= OPENSSL_VERS=1.0.2k @@ -160,7 +160,7 @@ DOCS := index faq config guide manifest build-script pkgid-spec crates-io \ environment-variables specifying-dependencies source-replacement \ - policies machine-readable-output + policies external-tools DOC_DIR := target/doc DOC_OPTS := --markdown-no-toc \ --markdown-css stylesheets/normalize.css \ @@ -195,11 +195,15 @@ @mkdir -p $(@D) cp $< $@ +OPENSSL_OS_aarch64-linux-android := linux-aarch64 OPENSSL_OS_aarch64-unknown-linux-gnu := linux-aarch64 +OPENSSL_OS_arm-linux-androideabi := android OPENSSL_OS_arm-unknown-linux-gnueabi := linux-armv4 OPENSSL_OS_arm-unknown-linux-gnueabihf := linux-armv4 +OPENSSL_OS_armv7-linux-androideabi := android-armv7 OPENSSL_OS_armv7-unknown-linux-gnueabihf := linux-armv4 OPENSSL_OS_i686-apple-darwin := darwin-i386-cc +OPENSSL_OS_i686-linux-android := android-x86 OPENSSL_OS_i686-unknown-freebsd := BSD-x86-elf OPENSSL_OS_i686-unknown-linux-gnu := linux-elf OPENSSL_OS_i686-unknown-linux-musl := linux-elf @@ -217,11 +221,15 @@ OPENSSL_OS_x86_64-unknown-linux-musl := linux-x86_64 OPENSSL_OS_x86_64-unknown-netbsd := BSD-x86_64 +OPENSSL_AR_aarch64-linux-android := aarch64-linux-android-ar OPENSSL_AR_aarch64-unknown-linux-gnu := aarch64-linux-gnu-ar +OPENSSL_AR_arm-linux-androideabi := arm-linux-androideabi-ar OPENSSL_AR_arm-unknown-linux-gnueabi := arm-linux-gnueabi-ar OPENSSL_AR_arm-unknown-linux-gnueabihf := arm-linux-gnueabihf-ar +OPENSSL_AR_armv7-linux-androideabi := arm-linux-androideabi-ar OPENSSL_AR_armv7-unknown-linux-gnueabihf := armv7-linux-gnueabihf-ar OPENSSL_AR_i686-apple-darwin := ar +OPENSSL_AR_i686-linux-android := i686-linux-android-ar OPENSSL_AR_i686-unknown-freebsd := i686-unknown-freebsd10-ar OPENSSL_AR_i686-unknown-linux-gnu := ar OPENSSL_AR_i686-unknown-linux-musl := ar @@ -238,11 +246,16 @@ OPENSSL_AR_x86_64-unknown-linux-gnu := ar OPENSSL_AR_x86_64-unknown-linux-musl := ar OPENSSL_AR_x86_64-unknown-netbsd := x86_64-unknown-netbsd-ar + +OPENSSL_CC_aarch64-linux-android := aarch64-linux-android-gcc OPENSSL_CC_aarch64-unknown-linux-gnu := aarch64-linux-gnu-gcc +OPENSSL_CC_arm-linux-androideabi := arm-linux-androideabi-gcc OPENSSL_CC_arm-unknown-linux-gnueabi := arm-linux-gnueabi-gcc OPENSSL_CC_arm-unknown-linux-gnueabihf := arm-linux-gnueabihf-gcc OPENSSL_CC_armv7-unknown-linux-gnueabihf := armv7-linux-gnueabihf-gcc +OPENSSL_CC_armv7-linux-androideabi := arm-linux-androideabi-gcc OPENSSL_CC_i686-apple-darwin := clang +OPENSSL_CC_i686-linux-android := i686-linux-android-gcc OPENSSL_CC_i686-unknown-freebsd := i686-unknown-freebsd10-gcc OPENSSL_CC_i686-unknown-linux-gnu := gcc OPENSSL_CC_i686-unknown-linux-musl := musl-gcc @@ -261,6 +274,8 @@ OPENSSL_CC_x86_64-unknown-netbsd := x86_64-unknown-netbsd-gcc SETARCH_i686-unknown-linux-gnu := setarch i386 + +OPENSSL_CFLAGS_aarch64-linux-android := "-mandroid -fomit-frame-pointer" OPENSSL_CFLAGS_i686-apple-darwin := -m32 OPENSSL_CFLAGS_i686-unknown-linux-gnu := -m32 OPENSSL_CFLAGS_i686-unknown-linux-musl := -m32 diff -Nru cargo-0.17.0/README.md cargo-0.19.0/README.md --- cargo-0.17.0/README.md 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/README.md 2017-05-16 03:23:10.000000000 +0000 @@ -4,7 +4,7 @@ ## Code Status [![Build Status](https://travis-ci.org/rust-lang/cargo.svg?branch=master)](https://travis-ci.org/rust-lang/cargo) -[![Build Status](https://ci.appveyor.com/api/projects/status/jnh54531mpidb2c2?svg=true)](https://ci.appveyor.com/project/alexcrichton/cargo) +[![Build Status](https://ci.appveyor.com/api/projects/status/github/rust-lang/cargo?branch=master&svg=true)](https://ci.appveyor.com/project/rust-lang-libs/cargo) ## Installing Cargo diff -Nru cargo-0.17.0/src/bin/cargo.rs cargo-0.19.0/src/bin/cargo.rs --- cargo-0.17.0/src/bin/cargo.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/bin/cargo.rs 2017-05-16 03:23:10.000000000 +0000 @@ -4,13 +4,17 @@ extern crate git2_curl; extern crate rustc_serialize; extern crate toml; -#[macro_use] extern crate log; +#[macro_use] +extern crate log; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; use std::collections::BTreeSet; use std::collections::HashMap; use std::env; use std::fs; -use std::path::{Path,PathBuf}; +use std::path::{Path, PathBuf}; use cargo::core::shell::{Verbosity, ColorConfig}; use cargo::util::{self, CliResult, lev_distance, Config, human, CargoResult}; @@ -73,23 +77,23 @@ Ok(cfg) => cfg, Err(e) => { let mut shell = cargo::shell(Verbosity::Verbose, ColorConfig::Auto); - cargo::handle_cli_error(e.into(), &mut shell) + cargo::exit_with_error(e.into(), &mut shell) } }; let result = (|| { - let args: Vec<_> = try!(env::args_os().map(|s| { - s.into_string().map_err(|s| { - human(format!("invalid unicode in argument: {:?}", s)) + let args: Vec<_> = try!(env::args_os() + .map(|s| { + s.into_string().map_err(|s| human(format!("invalid unicode in argument: {:?}", s))) }) - }).collect()); + .collect()); let rest = &args; cargo::call_main_without_stdin(execute, &config, USAGE, rest, true) })(); match result { - Err(e) => cargo::handle_cli_error(e, &mut *config.shell()), - Ok(()) => {}, + Err(e) => cargo::exit_with_error(e, &mut *config.shell()), + Ok(()) => {} } } @@ -140,10 +144,10 @@ */ fn execute(flags: Flags, config: &Config) -> CliResult { config.configure(flags.flag_verbose, - flags.flag_quiet, - &flags.flag_color, - flags.flag_frozen, - flags.flag_locked)?; + flags.flag_quiet, + &flags.flag_color, + flags.flag_frozen, + flags.flag_locked)?; init_git_transports(config); let _token = cargo::util::job::setup(); @@ -151,9 +155,11 @@ if flags.flag_version { let version = cargo::version(); println!("{}", version); - if flags.flag_verbose > 0{ + if flags.flag_verbose > 0 { println!("release: {}.{}.{}", - version.major, version.minor, version.patch); + version.major, + version.minor, + version.patch); if let Some(ref cfg) = version.cfg_info { if let Some(ref ci) = cfg.commit_info { println!("commit-hash: {}", ci.commit_hash); @@ -161,21 +167,21 @@ } } } - return Ok(()) + return Ok(()); } if flags.flag_list { println!("Installed Commands:"); for command in list_commands(config) { println!(" {}", command); - }; - return Ok(()) + } + return Ok(()); } if let Some(ref code) = flags.flag_explain { let mut procss = config.rustc()?.process(); procss.arg("--explain").arg(code).exec().map_err(human)?; - return Ok(()) + return Ok(()); } let args = match &flags.arg_command[..] { @@ -185,23 +191,18 @@ "" | "help" if flags.arg_args.is_empty() => { config.shell().set_verbosity(Verbosity::Verbose); let args = &["cargo".to_string(), "-h".to_string()]; - let r = cargo::call_main_without_stdin(execute, config, USAGE, args, - false); - cargo::process_executed(r, &mut config.shell()); - return Ok(()) + return cargo::call_main_without_stdin(execute, config, USAGE, args, false); } // For `cargo help -h` and `cargo help --help`, print out the help // message for `cargo help` - "help" if flags.arg_args[0] == "-h" || - flags.arg_args[0] == "--help" => { + "help" if flags.arg_args[0] == "-h" || flags.arg_args[0] == "--help" => { vec!["cargo".to_string(), "help".to_string(), "-h".to_string()] } // For `cargo help foo`, print out the usage message for the specified // subcommand by executing the command with the `-h` flag. - "help" => vec!["cargo".to_string(), flags.arg_args[0].clone(), - "-h".to_string()], + "help" => vec!["cargo".to_string(), flags.arg_args[0].clone(), "-h".to_string()], // For all other invocations, we're of the form `cargo foo args...`. We // use the exact environment arguments to preserve tokens like `--` for @@ -212,38 +213,39 @@ default_alias.insert("t", "test".to_string()); default_alias.insert("r", "run".to_string()); let mut args: Vec = env::args().collect(); - if let Some(new_command) = default_alias.get(&args[1][..]){ + if let Some(new_command) = default_alias.get(&args[1][..]) { args[1] = new_command.clone(); } args } }; - if try_execute(&config, &args) { - return Ok(()) + if let Some(r) = try_execute_builtin_command(&config, &args) { + return r; } let alias_list = aliased_command(&config, &args[1])?; let args = match alias_list { Some(alias_command) => { - let chain = args.iter().take(1) + let chain = args.iter() + .take(1) .chain(alias_command.iter()) .chain(args.iter().skip(2)) .map(|s| s.to_string()) .collect::>(); - if try_execute(&config, &chain) { - return Ok(()) + if let Some(r) = try_execute_builtin_command(&config, &chain) { + return r; } else { chain } } None => args, }; - execute_subcommand(config, &args[1], &args)?; - Ok(()) + + execute_external_subcommand(config, &args[1], &args) } -fn try_execute(config: &Config, args: &[String]) -> bool { +fn try_execute_builtin_command(config: &Config, args: &[String]) -> Option { macro_rules! cmd { ($name:ident) => (if args[1] == stringify!($name).replace("_", "-") { config.shell().set_verbosity(Verbosity::Verbose); @@ -251,13 +253,12 @@ $name::USAGE, &args, false); - cargo::process_executed(r, &mut config.shell()); - return true + return Some(r); }) } each_subcommand!(cmd); - return false + None } fn aliased_command(config: &Config, command: &String) -> CargoResult>> { @@ -266,17 +267,20 @@ match config.get_string(&alias_name) { Ok(value) => { if let Some(record) = value { - let alias_commands = record.val.split_whitespace() - .map(|s| s.to_string()) - .collect(); + let alias_commands = record.val + .split_whitespace() + .map(|s| s.to_string()) + .collect(); result = Ok(Some(alias_commands)); } - }, + } Err(_) => { let value = config.get_list(&alias_name)?; if let Some(record) = value { - let alias_commands: Vec = record.val.iter() - .map(|s| s.0.to_string()).collect(); + let alias_commands: Vec = record.val + .iter() + .map(|s| s.0.to_string()) + .collect(); result = Ok(Some(alias_commands)); } } @@ -288,32 +292,40 @@ let cmds = list_commands(config); // Only consider candidates with a lev_distance of 3 or less so we don't // suggest out-of-the-blue options. - let mut filtered = cmds.iter().map(|c| (lev_distance(&c, cmd), c)) - .filter(|&(d, _)| d < 4) - .collect::>(); + let mut filtered = cmds.iter() + .map(|c| (lev_distance(&c, cmd), c)) + .filter(|&(d, _)| d < 4) + .collect::>(); filtered.sort_by(|a, b| a.0.cmp(&b.0)); filtered.get(0).map(|slot| slot.1.clone()) } -fn execute_subcommand(config: &Config, - cmd: &str, - args: &[String]) -> CliResult { +fn execute_external_subcommand(config: &Config, cmd: &str, args: &[String]) -> CliResult { let command_exe = format!("cargo-{}{}", cmd, env::consts::EXE_SUFFIX); let path = search_directories(config) - .iter() - .map(|dir| dir.join(&command_exe)) - .find(|file| is_executable(file)); + .iter() + .map(|dir| dir.join(&command_exe)) + .find(|file| is_executable(file)); let command = match path { Some(command) => command, None => { return Err(human(match find_closest(config, cmd) { - Some(closest) => format!("no such subcommand: `{}`\n\n\t\ - Did you mean `{}`?\n", cmd, closest), - None => format!("no such subcommand: `{}`", cmd) - }).into()) + Some(closest) => { + format!("no such subcommand: `{}`\n\n\tDid you mean `{}`?\n", + cmd, + closest) + } + None => format!("no such subcommand: `{}`", cmd), + }) + .into()) } }; - let err = match util::process(&command).args(&args[1..]).exec() { + + let cargo_exe = config.cargo_exe()?; + let err = match util::process(&command) + .env(cargo::CARGO_ENV, cargo_exe) + .args(&args[1..]) + .exec() { Ok(()) => return Ok(()), Err(e) => e, }; @@ -333,16 +345,16 @@ for dir in search_directories(config) { let entries = match fs::read_dir(dir) { Ok(entries) => entries, - _ => continue + _ => continue, }; for entry in entries.filter_map(|e| e.ok()) { let path = entry.path(); let filename = match path.file_name().and_then(|s| s.to_str()) { Some(filename) => filename, - _ => continue + _ => continue, }; if !filename.starts_with(prefix) || !filename.ends_with(suffix) { - continue + continue; } if is_executable(entry.path()) { let end = filename.len() - suffix.len(); @@ -361,9 +373,9 @@ #[cfg(unix)] fn is_executable>(path: P) -> bool { use std::os::unix::prelude::*; - fs::metadata(path).map(|metadata| { - metadata.is_file() && metadata.permissions().mode() & 0o111 != 0 - }).unwrap_or(false) + fs::metadata(path) + .map(|metadata| metadata.is_file() && metadata.permissions().mode() & 0o111 != 0) + .unwrap_or(false) } #[cfg(windows)] fn is_executable>(path: P) -> bool { @@ -384,7 +396,7 @@ // case. The custom transport, however, is not as well battle-tested. match cargo::ops::http_proxy_exists(config) { Ok(true) => {} - _ => return + _ => return, } let handle = match cargo::ops::http_handle(config) { diff -Nru cargo-0.17.0/src/bin/check.rs cargo-0.19.0/src/bin/check.rs --- cargo-0.17.0/src/bin/check.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/bin/check.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,7 +1,7 @@ use std::env; use cargo::core::Workspace; -use cargo::ops::{self, CompileOptions, MessageFormat}; +use cargo::ops::{self, CompileOptions, MessageFormat, Packages}; use cargo::util::{CliResult, Config}; use cargo::util::important_paths::find_root_manifest_for_wd; @@ -13,7 +13,8 @@ Options: -h, --help Print this message - -p SPEC, --package SPEC ... Package to check + -p SPEC, --package SPEC ... Package(s) to check + --all Check all packages in the workspace -j N, --jobs N Number of parallel jobs, defaults to # of CPUs --lib Check only this package's library --bin NAME Check only the specified binary @@ -64,6 +65,7 @@ flag_bench: Vec, flag_locked: bool, flag_frozen: bool, + flag_all: bool, } pub fn execute(options: Options, config: &Config) -> CliResult { @@ -79,6 +81,12 @@ let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; let ws = Workspace::new(&root, config)?; + let spec = if options.flag_all { + Packages::All + } else { + Packages::Packages(&options.flag_package) + }; + let opts = CompileOptions { config: config, jobs: options.flag_jobs, @@ -86,7 +94,7 @@ features: &options.flag_features, all_features: options.flag_all_features, no_default_features: options.flag_no_default_features, - spec: ops::Packages::Packages(&options.flag_package), + spec: spec, mode: ops::CompileMode::Check, release: options.flag_release, filter: ops::CompileFilter::new(options.flag_lib, diff -Nru cargo-0.17.0/src/bin/init.rs cargo-0.19.0/src/bin/init.rs --- cargo-0.17.0/src/bin/init.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/bin/init.rs 2017-05-16 03:23:10.000000000 +0000 @@ -30,7 +30,7 @@ control system (git or hg) or do not initialize any version control at all (none) overriding a global configuration. --bin Use a binary (application) template - --lib Use a library template + --lib Use a library template [default] --name NAME Set the resulting package name -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) -q, --quiet No output printed to stdout diff -Nru cargo-0.17.0/src/bin/locate_project.rs cargo-0.19.0/src/bin/locate_project.rs --- cargo-0.17.0/src/bin/locate_project.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/bin/locate_project.rs 2017-05-16 03:23:10.000000000 +0000 @@ -18,7 +18,7 @@ -h, --help Print this message "; -#[derive(RustcEncodable)] +#[derive(Serialize)] pub struct ProjectLocation { root: String } diff -Nru cargo-0.17.0/src/bin/metadata.rs cargo-0.19.0/src/bin/metadata.rs --- cargo-0.17.0/src/bin/metadata.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/bin/metadata.rs 2017-05-16 03:23:10.000000000 +0000 @@ -9,7 +9,7 @@ flag_color: Option, flag_features: Vec, flag_all_features: bool, - flag_format_version: u32, + flag_format_version: Option, flag_manifest_path: Option, flag_no_default_features: bool, flag_no_deps: bool, @@ -34,7 +34,7 @@ --no-deps Output information only about the root package and don't fetch dependencies. --manifest-path PATH Path to the manifest - --format-version VERSION Format version [default: 1] + --format-version VERSION Format version Valid values: 1 -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) -q, --quiet No output printed to stdout @@ -51,12 +51,17 @@ options.flag_locked)?; let manifest = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + if options.flag_format_version.is_none() { + config.shell().warn("please specify `--format-version` flag explicitly to \ + avoid compatibility problems")? + } + let options = OutputMetadataOptions { features: options.flag_features, all_features: options.flag_all_features, no_default_features: options.flag_no_default_features, no_deps: options.flag_no_deps, - version: options.flag_format_version, + version: options.flag_format_version.unwrap_or(1), }; let ws = Workspace::new(&manifest, config)?; diff -Nru cargo-0.17.0/src/bin/new.rs cargo-0.19.0/src/bin/new.rs --- cargo-0.17.0/src/bin/new.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/bin/new.rs 2017-05-16 03:23:10.000000000 +0000 @@ -27,10 +27,10 @@ Options: -h, --help Print this message --vcs VCS Initialize a new repository for the given version - control system (git or hg) or do not initialize any version + control system (git, hg, or pijul) or do not initialize any version control at all (none) overriding a global configuration. --bin Use a binary (application) template - --lib Use a library template + --lib Use a library template [default] --name NAME Set the resulting package name, defaults to the value of -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) -q, --quiet No output printed to stdout diff -Nru cargo-0.17.0/src/bin/run.rs cargo-0.19.0/src/bin/run.rs --- cargo-0.17.0/src/bin/run.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/bin/run.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,3 +1,5 @@ +use std::iter::FromIterator; + use cargo::core::Workspace; use cargo::ops::{self, MessageFormat, Packages}; use cargo::util::{CliResult, CliError, Config, Human}; @@ -7,6 +9,7 @@ pub struct Options { flag_bin: Option, flag_example: Option, + flag_package: Option, flag_jobs: Option, flag_features: Vec, flag_all_features: bool, @@ -30,22 +33,23 @@ cargo run [options] [--] [...] Options: - -h, --help Print this message - --bin NAME Name of the bin target to run - --example NAME Name of the example target to run - -j N, --jobs N Number of parallel jobs, defaults to # of CPUs - --release Build artifacts in release mode, with optimizations - --features FEATURES Space-separated list of features to also build - --all-features Build all available features - --no-default-features Do not build the `default` feature - --target TRIPLE Build for the target triple - --manifest-path PATH Path to the manifest to execute - -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) - -q, --quiet No output printed to stdout - --color WHEN Coloring: auto, always, never - --message-format FMT Error format: human, json [default: human] - --frozen Require Cargo.lock and cache are up to date - --locked Require Cargo.lock is up to date + -h, --help Print this message + --bin NAME Name of the bin target to run + --example NAME Name of the example target to run + -p SPEC, --package SPEC Package with the target to run + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + --release Build artifacts in release mode, with optimizations + --features FEATURES Space-separated list of features to also build + --all-features Build all available features + --no-default-features Do not build the `default` feature + --target TRIPLE Build for the target triple + --manifest-path PATH Path to the manifest to execute + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --message-format FMT Error format: human, json [default: human] + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date If neither `--bin` nor `--example` are given, then if the project only has one bin target it will be run. Otherwise `--bin` specifies the bin target to run, @@ -74,6 +78,9 @@ examples.push(s); } + let packages = Vec::from_iter(options.flag_package.iter().cloned()); + let spec = Packages::Packages(&packages); + let compile_opts = ops::CompileOptions { config: config, jobs: options.flag_jobs, @@ -81,11 +88,11 @@ features: &options.flag_features, all_features: options.flag_all_features, no_default_features: options.flag_no_default_features, - spec: Packages::Packages(&[]), + spec: spec, release: options.flag_release, mode: ops::CompileMode::Build, filter: if examples.is_empty() && bins.is_empty() { - ops::CompileFilter::Everything + ops::CompileFilter::Everything { required_features_filterable: false, } } else { ops::CompileFilter::Only { lib: false, tests: &[], benches: &[], diff -Nru cargo-0.17.0/src/bin/test.rs cargo-0.19.0/src/bin/test.rs --- cargo-0.17.0/src/bin/test.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/bin/test.rs 2017-05-16 03:23:10.000000000 +0000 @@ -41,10 +41,10 @@ -h, --help Print this message --lib Test only this package's library --doc Test only this library's documentation - --bin NAME Test only the specified binary - --example NAME Test only the specified example - --test NAME Test only the specified integration test target - --bench NAME Test only the specified benchmark target + --bin NAME ... Test only the specified binaries + --example NAME ... Check that the specified examples compile + --test NAME ... Test only the specified integration test targets + --bench NAME ... Test only the specified benchmark targets --no-run Compile, but don't run tests -p SPEC, --package SPEC ... Package to run tests for --all Test all packages in the workspace @@ -149,7 +149,7 @@ None => Ok(()), Some(err) => { Err(match err.exit.as_ref().and_then(|e| e.code()) { - Some(i) => CliError::new(human("test failed"), i), + Some(i) => CliError::new(human(err.hint()), i), None => CliError::new(Box::new(Human(err)), 101), }) } diff -Nru cargo-0.17.0/src/bin/verify_project.rs cargo-0.19.0/src/bin/verify_project.rs --- cargo-0.17.0/src/bin/verify_project.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/bin/verify_project.rs 2017-05-16 03:23:10.000000000 +0000 @@ -6,7 +6,7 @@ use cargo; use cargo::util::important_paths::{find_root_manifest_for_wd}; use cargo::util::{CliResult, Config}; -use rustc_serialize::json; +use serde_json; use toml; #[derive(RustcDecodable)] @@ -55,10 +55,9 @@ Ok(_) => {}, Err(e) => fail("invalid", &format!("error reading file: {}", e)) }; - match toml::Parser::new(&contents).parse() { - None => fail("invalid", "invalid-format"), - Some(..) => {} - }; + if contents.parse::().is_err() { + fail("invalid", "invalid-format"); + } let mut h = HashMap::new(); h.insert("success".to_string(), "true".to_string()); @@ -69,6 +68,6 @@ fn fail(reason: &str, value: &str) -> ! { let mut h = HashMap::new(); h.insert(reason.to_string(), value.to_string()); - println!("{}", json::encode(&h).unwrap()); + println!("{}", serde_json::to_string(&h).unwrap()); process::exit(1) } diff -Nru cargo-0.17.0/src/cargo/core/dependency.rs cargo-0.19.0/src/cargo/core/dependency.rs --- cargo-0.17.0/src/cargo/core/dependency.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/core/dependency.rs 2017-05-16 03:23:10.000000000 +0000 @@ -4,7 +4,7 @@ use semver::VersionReq; use semver::ReqParseError; -use rustc_serialize::{Encoder, Encodable}; +use serde::ser; use core::{SourceId, Summary, PackageId}; use util::{CargoError, CargoResult, Cfg, CfgExpr, ChainError, human, Config}; @@ -41,7 +41,7 @@ Cfg(CfgExpr), } -#[derive(RustcEncodable)] +#[derive(Serialize)] struct SerializedDependency<'a> { name: &'a str, source: &'a SourceId, @@ -54,8 +54,10 @@ target: Option<&'a Platform>, } -impl Encodable for Dependency { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { +impl ser::Serialize for Dependency { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { SerializedDependency { name: self.name(), source: &self.source_id(), @@ -65,7 +67,7 @@ uses_default_features: self.uses_default_features(), features: self.features(), target: self.platform(), - }.encode(s) + }.serialize(s) } } @@ -76,13 +78,15 @@ Build, } -impl Encodable for Kind { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { +impl ser::Serialize for Kind { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { match *self { Kind::Normal => None, Kind::Development => Some("dev"), Kind::Build => Some("build"), - }.encode(s) + }.serialize(s) } } @@ -136,7 +140,7 @@ update to a fixed version or contact the upstream maintainer about this warning. ", - req, inside.name(), inside.version(), requirement); + req, inside.name(), inside.version(), requirement); config.shell().warn(&msg)?; Ok(requirement) @@ -348,9 +352,11 @@ } } -impl Encodable for Platform { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - self.to_string().encode(s) +impl ser::Serialize for Platform { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + self.to_string().serialize(s) } } diff -Nru cargo-0.17.0/src/cargo/core/manifest.rs cargo-0.19.0/src/cargo/core/manifest.rs --- cargo-0.17.0/src/cargo/core/manifest.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/core/manifest.rs 2017-05-16 03:23:10.000000000 +0000 @@ -3,7 +3,7 @@ use std::path::{PathBuf, Path}; use semver::Version; -use rustc_serialize::{Encoder, Encodable}; +use serde::ser; use core::{Dependency, PackageId, Summary, SourceId, PackageIdSpec}; use core::WorkspaceConfig; @@ -59,7 +59,7 @@ pub badges: HashMap>, } -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum LibKind { Lib, Rlib, @@ -101,7 +101,7 @@ } } -#[derive(Debug, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] pub enum TargetKind { Lib(Vec), Bin, @@ -112,58 +112,53 @@ CustomBuild, } -impl Encodable for TargetKind { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { +impl ser::Serialize for TargetKind { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + use self::TargetKind::*; match *self { - TargetKind::Lib(ref kinds) | - TargetKind::ExampleLib(ref kinds) => { - kinds.iter().map(LibKind::crate_type).collect() - } - TargetKind::Bin => vec!["bin"], - TargetKind::ExampleBin => vec!["example"], - TargetKind::Test => vec!["test"], - TargetKind::CustomBuild => vec!["custom-build"], - TargetKind::Bench => vec!["bench"], - }.encode(s) + Lib(ref kinds) => kinds.iter().map(LibKind::crate_type).collect(), + Bin => vec!["bin"], + ExampleBin | ExampleLib(_) => vec!["example"], + Test => vec!["test"], + CustomBuild => vec!["custom-build"], + Bench => vec!["bench"] + }.serialize(s) } } -#[derive(Clone, PartialEq, Eq, Debug, Hash)] + +// Note that most of the fields here are skipped when serializing because we +// don't want to export them just yet (becomes a public API of Cargo). Others +// though are definitely needed! +#[derive(Clone, PartialEq, Eq, Debug, Hash, Serialize)] pub struct Profile { pub opt_level: String, + #[serde(skip_serializing)] pub lto: bool, + #[serde(skip_serializing)] pub codegen_units: Option, // None = use rustc default + #[serde(skip_serializing)] pub rustc_args: Option>, + #[serde(skip_serializing)] pub rustdoc_args: Option>, pub debuginfo: Option, pub debug_assertions: bool, + pub overflow_checks: bool, + #[serde(skip_serializing)] pub rpath: bool, pub test: bool, + #[serde(skip_serializing)] pub doc: bool, + #[serde(skip_serializing)] pub run_custom_build: bool, + #[serde(skip_serializing)] pub check: bool, + #[serde(skip_serializing)] pub panic: Option, } -#[derive(RustcEncodable)] -struct SerializedProfile<'a> { - opt_level: &'a str, - debuginfo: Option, - debug_assertions: bool, - test: bool, -} - -impl Encodable for Profile { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - SerializedProfile { - opt_level: &self.opt_level, - debuginfo: self.debuginfo, - debug_assertions: self.debug_assertions, - test: self.test, - }.encode(s) - } -} - #[derive(Default, Clone, Debug, PartialEq, Eq)] pub struct Profiles { pub release: Profile, @@ -185,6 +180,7 @@ kind: TargetKind, name: String, src_path: PathBuf, + required_features: Option>, tested: bool, benched: bool, doc: bool, @@ -193,20 +189,26 @@ for_host: bool, } -#[derive(RustcEncodable)] +#[derive(Serialize)] struct SerializedTarget<'a> { + /// Is this a `--bin bin`, `--lib`, `--example ex`? + /// Serialized as a list of strings for historical reasons. kind: &'a TargetKind, + /// Corresponds to `--crate-type` compiler attribute. + /// See https://doc.rust-lang.org/reference.html#linkage + crate_types: Vec<&'a str>, name: &'a str, - src_path: &'a str, + src_path: &'a PathBuf, } -impl Encodable for Target { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { +impl ser::Serialize for Target { + fn serialize(&self, s: S) -> Result { SerializedTarget { kind: &self.kind, + crate_types: self.rustc_crate_types(), name: &self.name, - src_path: &self.src_path.display().to_string(), - }.encode(s) + src_path: &self.src_path, + }.serialize(s) } } @@ -305,6 +307,7 @@ kind: TargetKind::Bin, name: String::new(), src_path: src_path, + required_features: None, doc: false, doctest: false, harness: true, @@ -326,10 +329,12 @@ } } - pub fn bin_target(name: &str, src_path: PathBuf) -> Target { + pub fn bin_target(name: &str, src_path: PathBuf, + required_features: Option>) -> Target { Target { kind: TargetKind::Bin, name: name.to_string(), + required_features: required_features, doc: true, ..Target::with_path(src_path) } @@ -349,7 +354,8 @@ pub fn example_target(name: &str, crate_targets: Vec, - src_path: PathBuf) -> Target { + src_path: PathBuf, + required_features: Option>) -> Target { let kind = if crate_targets.is_empty() { TargetKind::ExampleBin } else { @@ -359,24 +365,29 @@ Target { kind: kind, name: name.to_string(), + required_features: required_features, benched: false, ..Target::with_path(src_path) } } - pub fn test_target(name: &str, src_path: PathBuf) -> Target { + pub fn test_target(name: &str, src_path: PathBuf, + required_features: Option>) -> Target { Target { kind: TargetKind::Test, name: name.to_string(), + required_features: required_features, benched: false, ..Target::with_path(src_path) } } - pub fn bench_target(name: &str, src_path: PathBuf) -> Target { + pub fn bench_target(name: &str, src_path: PathBuf, + required_features: Option>) -> Target { Target { kind: TargetKind::Bench, name: name.to_string(), + required_features: required_features, tested: false, ..Target::with_path(src_path) } @@ -385,6 +396,7 @@ pub fn name(&self) -> &str { &self.name } pub fn crate_name(&self) -> String { self.name.replace("-", "_") } pub fn src_path(&self) -> &Path { &self.src_path } + pub fn required_features(&self) -> Option<&Vec> { self.required_features.as_ref() } pub fn kind(&self) -> &TargetKind { &self.kind } pub fn tested(&self) -> bool { self.tested } pub fn harness(&self) -> bool { self.harness } @@ -517,6 +529,7 @@ Profile { debuginfo: Some(2), debug_assertions: true, + overflow_checks: true, ..Profile::default() } } @@ -583,6 +596,7 @@ rustdoc_args: None, debuginfo: None, debug_assertions: false, + overflow_checks: false, rpath: false, test: false, doc: false, diff -Nru cargo-0.17.0/src/cargo/core/package_id.rs cargo-0.19.0/src/cargo/core/package_id.rs --- cargo-0.17.0/src/cargo/core/package_id.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/core/package_id.rs 2017-05-16 03:23:10.000000000 +0000 @@ -5,9 +5,9 @@ use std::hash; use std::sync::Arc; -use regex::Regex; -use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; use semver; +use serde::de; +use serde::ser; use util::{CargoResult, CargoError, ToSemver}; use core::source::SourceId; @@ -25,32 +25,41 @@ source_id: SourceId, } -impl Encodable for PackageId { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { +impl ser::Serialize for PackageId { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer + { let source = self.inner.source_id.to_url(); let encoded = format!("{} {} ({})", self.inner.name, self.inner.version, source); - encoded.encode(s) + encoded.serialize(s) } } -impl Decodable for PackageId { - fn decode(d: &mut D) -> Result { - let string: String = Decodable::decode(d)?; - let regex = Regex::new(r"^([^ ]+) ([^ ]+) \(([^\)]+)\)$").unwrap(); - let captures = regex.captures(&string).ok_or_else(|| { - d.error("invalid serialized PackageId") - })?; - - let name = captures.at(1).unwrap(); - let version = captures.at(2).unwrap(); - let url = captures.at(3).unwrap(); - let version = semver::Version::parse(version).map_err(|_| { - d.error("invalid version") - })?; - let source_id = SourceId::from_url(url).map_err(|e| { - d.error(&e.to_string()) - })?; +impl de::Deserialize for PackageId { + fn deserialize(d: D) -> Result + where D: de::Deserializer + { + let string = String::deserialize(d)?; + let mut s = string.splitn(3, ' '); + let name = s.next().unwrap(); + let version = match s.next() { + Some(s) => s, + None => return Err(de::Error::custom("invalid serialized PackageId")), + }; + let version = semver::Version::parse(version) + .map_err(de::Error::custom)?; + let url = match s.next() { + Some(s) => s, + None => return Err(de::Error::custom("invalid serialized PackageId")), + }; + let url = if url.starts_with("(") && url.ends_with(")") { + &url[1..url.len() - 1] + } else { + return Err(de::Error::custom("invalid serialized PackageId")) + + }; + let source_id = SourceId::from_url(url).map_err(de::Error::custom)?; Ok(PackageId { inner: Arc::new(PackageIdInner { diff -Nru cargo-0.17.0/src/cargo/core/package.rs cargo-0.19.0/src/cargo/core/package.rs --- cargo-0.17.0/src/cargo/core/package.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/core/package.rs 2017-05-16 03:23:10.000000000 +0000 @@ -5,12 +5,12 @@ use std::path::{Path, PathBuf}; use semver::Version; +use serde::ser; use core::{Dependency, Manifest, PackageId, SourceId, Target}; use core::{Summary, SourceMap}; use ops; use util::{CargoResult, Config, LazyCell, ChainError, internal, human, lev_distance}; -use rustc_serialize::{Encoder,Encodable}; /// Information about a package that is available somewhere in the file system. /// @@ -24,13 +24,14 @@ manifest_path: PathBuf, } -#[derive(RustcEncodable)] +#[derive(Serialize)] struct SerializedPackage<'a> { name: &'a str, version: &'a str, id: &'a PackageId, license: Option<&'a str>, license_file: Option<&'a str>, + description: Option<&'a str>, source: &'a SourceId, dependencies: &'a [Dependency], targets: &'a [Target], @@ -38,13 +39,16 @@ manifest_path: &'a str, } -impl Encodable for Package { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { +impl ser::Serialize for Package { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { let summary = self.manifest.summary(); let package_id = summary.package_id(); let manmeta = self.manifest.metadata(); let license = manmeta.license.as_ref().map(String::as_ref); let license_file = manmeta.license_file.as_ref().map(String::as_ref); + let description = manmeta.description.as_ref().map(String::as_ref); SerializedPackage { name: &package_id.name(), @@ -52,12 +56,13 @@ id: package_id, license: license, license_file: license_file, + description: description, source: summary.source_id(), dependencies: summary.dependencies(), targets: &self.manifest.targets(), features: summary.features(), manifest_path: &self.manifest_path.display().to_string(), - }.encode(s) + }.serialize(s) } } diff -Nru cargo-0.17.0/src/cargo/core/resolver/encode.rs cargo-0.19.0/src/cargo/core/resolver/encode.rs --- cargo-0.17.0/src/cargo/core/resolver/encode.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/core/resolver/encode.rs 2017-05-16 03:23:10.000000000 +0000 @@ -2,15 +2,15 @@ use std::fmt; use std::str::FromStr; -use regex::Regex; -use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; +use serde::ser; +use serde::de; use core::{Package, PackageId, SourceId, Workspace}; use util::{CargoResult, Graph, Config, internal, ChainError, CargoError}; use super::Resolve; -#[derive(RustcEncodable, RustcDecodable, Debug)] +#[derive(Serialize, Deserialize, Debug)] pub struct EncodableResolve { package: Option>, /// `root` is optional to allow forward compatibility. @@ -154,6 +154,7 @@ Ok(Resolve { graph: g, + empty_features: HashSet::new(), features: HashMap::new(), replacements: replacements, checksums: checksums, @@ -207,7 +208,7 @@ } } -#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)] +#[derive(Serialize, Deserialize, Debug, PartialOrd, Ord, PartialEq, Eq)] pub struct EncodableDependency { name: String, version: String, @@ -237,16 +238,19 @@ type Err = Box; fn from_str(s: &str) -> CargoResult { - let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap(); - let captures = regex.captures(s).ok_or_else(|| { + let mut s = s.splitn(3, ' '); + let name = s.next().unwrap(); + let version = s.next().chain_error(|| { internal("invalid serialized PackageId") })?; - - let name = captures.at(1).unwrap(); - let version = captures.at(2).unwrap(); - - let source_id = match captures.at(3) { - Some(s) => Some(SourceId::from_url(s)?), + let source_id = match s.next() { + Some(s) => { + if s.starts_with("(") && s.ends_with(")") { + Some(SourceId::from_url(&s[1..s.len() - 1])?) + } else { + bail!("invalid serialized PackageId") + } + } None => None, }; @@ -258,17 +262,21 @@ } } -impl Encodable for EncodablePackageId { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - self.to_string().encode(s) +impl ser::Serialize for EncodablePackageId { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + self.to_string().serialize(s) } } -impl Decodable for EncodablePackageId { - fn decode(d: &mut D) -> Result { - String::decode(d).and_then(|string| { +impl de::Deserialize for EncodablePackageId { + fn deserialize(d: D) -> Result + where D: de::Deserializer, + { + String::deserialize(d).and_then(|string| { string.parse::() - .map_err(|e| d.error(&e.to_string())) + .map_err(de::Error::custom) }) } } @@ -279,8 +287,10 @@ pub use_root_key: bool, } -impl<'a, 'cfg> Encodable for WorkspaceResolve<'a, 'cfg> { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { +impl<'a, 'cfg> ser::Serialize for WorkspaceResolve<'a, 'cfg> { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect(); ids.sort(); @@ -318,7 +328,7 @@ package: Some(encodable), root: root, metadata: metadata, - }.encode(s) + }.serialize(s) } } diff -Nru cargo-0.17.0/src/cargo/core/resolver/mod.rs cargo-0.19.0/src/cargo/core/resolver/mod.rs --- cargo-0.17.0/src/cargo/core/resolver/mod.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/core/resolver/mod.rs 2017-05-16 03:23:10.000000000 +0000 @@ -47,6 +47,7 @@ use std::cmp::Ordering; use std::collections::{HashSet, HashMap, BinaryHeap, BTreeMap}; +use std::iter::FromIterator; use std::fmt; use std::ops::Range; use std::rc::Rc; @@ -74,6 +75,7 @@ pub struct Resolve { graph: Graph, replacements: HashMap, + empty_features: HashSet, features: HashMap>, checksums: HashMap>, metadata: Metadata, @@ -210,8 +212,14 @@ &self.replacements } - pub fn features(&self, pkg: &PackageId) -> Option<&HashSet> { - self.features.get(pkg) + pub fn features(&self, pkg: &PackageId) -> &HashSet { + self.features.get(pkg).unwrap_or(&self.empty_features) + } + + pub fn features_sorted(&self, pkg: &PackageId) -> Vec<&str> { + let mut v = Vec::from_iter(self.features(pkg).iter().map(|s| s.as_ref())); + v.sort(); + v } pub fn query(&self, spec: &str) -> CargoResult<&PackageId> { @@ -273,6 +281,7 @@ let mut resolve = Resolve { graph: cx.resolve_graph, + empty_features: HashSet::new(), features: cx.resolve_features, checksums: HashMap::new(), metadata: BTreeMap::new(), @@ -307,15 +316,13 @@ candidate.summary.package_id().clone()); } - if cx.flag_activated(&candidate.summary, method) { - return Ok(None); - } + let activated = cx.flag_activated(&candidate.summary, method); let candidate = match candidate.replace { Some(replace) => { cx.resolve_replacements.insert(candidate.summary.package_id().clone(), replace.package_id().clone()); - if cx.flag_activated(&replace, method) { + if cx.flag_activated(&replace, method) && activated { return Ok(None); } trace!("activating {} (replacing {})", replace.package_id(), @@ -323,6 +330,9 @@ replace } None => { + if activated { + return Ok(None) + } trace!("activating {}", candidate.summary.package_id()); candidate.summary } @@ -633,14 +643,6 @@ // Note that we re-query the registry with a new dependency that // allows any version so we can give some nicer error reporting // which indicates a few versions that were actually found. - let msg = format!("no matching package named `{}` found \ - (required by `{}`)\n\ - location searched: {}\n\ - version required: {}", - dep.name(), parent.name(), - dep.source_id(), - dep.version_req()); - let mut msg = msg; let all_req = semver::VersionReq::parse("*").unwrap(); let new_dep = dep.clone_inner().set_version_req(all_req).into_dependency(); let mut candidates = match registry.query(&new_dep) { @@ -650,27 +652,50 @@ candidates.sort_by(|a, b| { b.version().cmp(a.version()) }); - if !candidates.is_empty() { - msg.push_str("\nversions found: "); - for (i, c) in candidates.iter().take(3).enumerate() { - if i != 0 { msg.push_str(", "); } - msg.push_str(&c.version().to_string()); - } - if candidates.len() > 3 { - msg.push_str(", ..."); - } - } - // If we have a path dependency with a locked version, then this may - // indicate that we updated a sub-package and forgot to run `cargo - // update`. In this case try to print a helpful error! - if dep.source_id().is_path() && - dep.version_req().to_string().starts_with("=") && - !candidates.is_empty() { - msg.push_str("\nconsider running `cargo update` to update \ - a path dependency's locked version"); + let msg = if !candidates.is_empty() { + let versions = { + let mut versions = candidates.iter().take(3).map(|cand| { + cand.version().to_string() + }).collect::>(); + + if candidates.len() > 3 { + versions.push("...".into()); + } + + versions.join(", ") + }; + + let mut msg = format!("no matching version `{}` found for package `{}` \ + (required by `{}`)\n\ + location searched: {}\n\ + versions found: {}", + dep.version_req(), + dep.name(), + parent.name(), + dep.source_id(), + versions); + + // If we have a path dependency with a locked version, then this may + // indicate that we updated a sub-package and forgot to run `cargo + // update`. In this case try to print a helpful error! + if dep.source_id().is_path() + && dep.version_req().to_string().starts_with("=") { + msg.push_str("\nconsider running `cargo update` to update \ + a path dependency's locked version"); + } + + msg + } else { + format!("no matching package named `{}` found \ + (required by `{}`)\n\ + location searched: {}\n\ + version required: {}", + dep.name(), parent.name(), + dep.source_id(), + dep.version_req()) + }; - } human(msg) } @@ -1013,13 +1038,17 @@ // dependencies. if checked.insert(id) { let summary = summaries[id]; - for dep in resolve.deps(id) { + for dep in resolve.deps_not_replaced(id) { let is_transitive = summary.dependencies().iter().any(|d| { d.matches_id(dep) && d.is_transitive() }); let mut empty = HashSet::new(); let visited = if is_transitive {&mut *visited} else {&mut empty}; visit(resolve, dep, summaries, visited, checked)?; + + if let Some(id) = resolve.replacement(dep) { + visit(resolve, id, summaries, visited, checked)?; + } } } diff -Nru cargo-0.17.0/src/cargo/core/source.rs cargo-0.19.0/src/cargo/core/source.rs --- cargo-0.17.0/src/cargo/core/source.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/core/source.rs 2017-05-16 03:23:10.000000000 +0000 @@ -7,7 +7,8 @@ use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT}; use std::sync::atomic::Ordering::SeqCst; -use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; +use serde::ser; +use serde::de; use url::Url; use core::{Package, PackageId, Registry}; @@ -342,22 +343,24 @@ } } -impl Encodable for SourceId { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { +impl ser::Serialize for SourceId { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { if self.is_path() { - s.emit_option_none() + None::.serialize(s) } else { - self.to_url().encode(s) + Some(self.to_url()).serialize(s) } } } -impl Decodable for SourceId { - fn decode(d: &mut D) -> Result { - let string: String = Decodable::decode(d)?; - SourceId::from_url(&string).map_err(|e| { - d.error(&e.to_string()) - }) +impl de::Deserialize for SourceId { + fn deserialize(d: D) -> Result + where D: de::Deserializer, + { + let string = String::deserialize(d)?; + SourceId::from_url(&string).map_err(de::Error::custom) } } diff -Nru cargo-0.17.0/src/cargo/core/workspace.rs cargo-0.19.0/src/cargo/core/workspace.rs --- cargo-0.17.0/src/cargo/core/workspace.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/core/workspace.rs 2017-05-16 03:23:10.000000000 +0000 @@ -44,6 +44,11 @@ // True, if this is a temporary workspace created for the purposes of // cargo install or cargo package. is_ephemeral: bool, + + // True if this workspace should enforce optional dependencies even when + // not needed; false if this workspace should only enforce dependencies + // needed by the current configuration (such as in cargo install). + require_optional_deps: bool, } // Separate structure for tracking loaded packages (to avoid loading anything @@ -63,7 +68,10 @@ pub enum WorkspaceConfig { /// Indicates that `[workspace]` was present and the members were /// optionally specified as well. - Root { members: Option> }, + Root { + members: Option>, + exclude: Vec, + }, /// Indicates that `[workspace]` was present and the `root` field is the /// optional value of `package.workspace`, if present. @@ -99,6 +107,7 @@ target_dir: target_dir, members: Vec::new(), is_ephemeral: false, + require_optional_deps: true, }; ws.root_manifest = ws.find_root(manifest_path)?; ws.find_members()?; @@ -115,8 +124,8 @@ /// /// This is currently only used in niche situations like `cargo install` or /// `cargo package`. - pub fn ephemeral(package: Package, config: &'cfg Config, target_dir: Option) - -> CargoResult> { + pub fn ephemeral(package: Package, config: &'cfg Config, target_dir: Option, + require_optional_deps: bool) -> CargoResult> { let mut ws = Workspace { config: config, current_manifest: package.manifest_path().to_path_buf(), @@ -128,6 +137,7 @@ target_dir: None, members: Vec::new(), is_ephemeral: true, + require_optional_deps: require_optional_deps, }; { let key = ws.current_manifest.parent().unwrap(); @@ -219,6 +229,10 @@ self.is_ephemeral } + pub fn require_optional_deps(&self) -> bool { + self.require_optional_deps + } + /// Finds the root of a workspace for the crate whose manifest is located /// at `manifest_path`. /// @@ -230,6 +244,14 @@ /// if some other transient error happens. fn find_root(&mut self, manifest_path: &Path) -> CargoResult> { + fn read_root_pointer(member_manifest: &Path, root_link: &str) -> CargoResult { + let path = member_manifest.parent().unwrap() + .join(root_link) + .join("Cargo.toml"); + debug!("find_root - pointer {}", path.display()); + return Ok(paths::normalize_path(&path)) + }; + { let current = self.packages.load(&manifest_path)?; match *current.workspace_config() { @@ -238,30 +260,31 @@ return Ok(Some(manifest_path.to_path_buf())) } WorkspaceConfig::Member { root: Some(ref path_to_root) } => { - let path = manifest_path.parent().unwrap() - .join(path_to_root) - .join("Cargo.toml"); - debug!("find_root - pointer {}", path.display()); - return Ok(Some(paths::normalize_path(&path))) + return Ok(Some(read_root_pointer(manifest_path, path_to_root)?)) } WorkspaceConfig::Member { root: None } => {} } } - let mut cur = manifest_path.parent().and_then(|p| p.parent()); - while let Some(path) = cur { + for path in paths::ancestors(manifest_path).skip(2) { let manifest = path.join("Cargo.toml"); debug!("find_root - trying {}", manifest.display()); if manifest.exists() { match *self.packages.load(&manifest)?.workspace_config() { - WorkspaceConfig::Root { .. } => { - debug!("find_root - found"); - return Ok(Some(manifest)) + WorkspaceConfig::Root { ref exclude, ref members } => { + debug!("find_root - found a root checking exclusion"); + if !is_excluded(members, exclude, path, manifest_path) { + debug!("find_root - found!"); + return Ok(Some(manifest)) + } + } + WorkspaceConfig::Member { root: Some(ref path_to_root) } => { + debug!("find_root - found pointer"); + return Ok(Some(read_root_pointer(&manifest, path_to_root)?)) } WorkspaceConfig::Member { .. } => {} } } - cur = path.parent(); } Ok(None) @@ -286,7 +309,7 @@ let members = { let root = self.packages.load(&root_manifest)?; match *root.workspace_config() { - WorkspaceConfig::Root { ref members } => members.clone(), + WorkspaceConfig::Root { ref members, .. } => members.clone(), _ => bail!("root of a workspace inferred but wasn't a root: {}", root_manifest.display()), } @@ -296,14 +319,17 @@ for path in list { let root = root_manifest.parent().unwrap(); let manifest_path = root.join(path).join("Cargo.toml"); - self.find_path_deps(&manifest_path, false)?; + self.find_path_deps(&manifest_path, &root_manifest, false)?; } } - self.find_path_deps(&root_manifest, false) + self.find_path_deps(&root_manifest, &root_manifest, false) } - fn find_path_deps(&mut self, manifest_path: &Path, is_path_dep: bool) -> CargoResult<()> { + fn find_path_deps(&mut self, + manifest_path: &Path, + root_manifest: &Path, + is_path_dep: bool) -> CargoResult<()> { let manifest_path = paths::normalize_path(manifest_path); if self.members.iter().any(|p| p == &manifest_path) { return Ok(()) @@ -316,6 +342,16 @@ return Ok(()) } + let root = root_manifest.parent().unwrap(); + match *self.packages.load(root_manifest)?.workspace_config() { + WorkspaceConfig::Root { ref members, ref exclude } => { + if is_excluded(members, exclude, root, &manifest_path) { + return Ok(()) + } + } + _ => {} + } + debug!("find_members - {}", manifest_path.display()); self.members.push(manifest_path.clone()); @@ -333,7 +369,7 @@ .collect::>() }; for candidate in candidates { - self.find_path_deps(&candidate, true)?; + self.find_path_deps(&candidate, root_manifest, true)?; } Ok(()) } @@ -438,7 +474,7 @@ MaybePackage::Virtual(_) => members_msg, MaybePackage::Package(ref p) => { let members = match *p.manifest().workspace_config() { - WorkspaceConfig::Root { ref members } => members, + WorkspaceConfig::Root { ref members, .. } => members, WorkspaceConfig::Member { .. } => unreachable!(), }; if members.is_none() { @@ -491,6 +527,27 @@ } } +fn is_excluded(members: &Option>, + exclude: &[String], + root_path: &Path, + manifest_path: &Path) -> bool { + let excluded = exclude.iter().any(|ex| { + manifest_path.starts_with(root_path.join(ex)) + }); + + let explicit_member = match *members { + Some(ref members) => { + members.iter().any(|mem| { + manifest_path.starts_with(root_path.join(mem)) + }) + } + None => false, + }; + + !explicit_member && excluded +} + + impl<'cfg> Packages<'cfg> { fn get(&self, manifest_path: &Path) -> &MaybePackage { &self.packages[manifest_path.parent().unwrap()] diff -Nru cargo-0.17.0/src/cargo/lib.rs cargo-0.19.0/src/cargo/lib.rs --- cargo-0.17.0/src/cargo/lib.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/lib.rs 2017-05-16 03:23:10.000000000 +0000 @@ -3,6 +3,9 @@ #[cfg(test)] extern crate hamcrest; #[macro_use] extern crate log; +#[macro_use] extern crate serde_derive; +#[macro_use] extern crate serde_json; +extern crate chrono; extern crate crates_io as registry; extern crate crossbeam; extern crate curl; @@ -15,9 +18,10 @@ extern crate libc; extern crate libgit2_sys; extern crate num_cpus; -extern crate regex; extern crate rustc_serialize; extern crate semver; +extern crate serde; +extern crate serde_ignored; extern crate shell_escape; extern crate tar; extern crate tempdir; @@ -27,8 +31,8 @@ use std::io; use std::fmt; -use rustc_serialize::{Decodable, Encodable}; -use rustc_serialize::json; +use rustc_serialize::Decodable; +use serde::ser; use docopt::Docopt; use core::{Shell, MultiShell, ShellConfig, Verbosity, ColorConfig}; @@ -37,6 +41,8 @@ pub use util::{CargoError, CargoResult, CliError, CliResult, human, Config, ChainError}; +pub const CARGO_ENV: &'static str = "CARGO"; + macro_rules! bail { ($($fmt:tt)*) => ( return Err(::util::human(&format_args!($($fmt)*))) @@ -57,8 +63,6 @@ pub struct CfgInfo { // Information about the git repository we may have been built from. pub commit_info: Option, - // The date that the build was performed. - pub build_date: String, // The release channel we were built for. pub release_channel: String, } @@ -75,29 +79,20 @@ impl fmt::Display for VersionInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "cargo-{}.{}.{}", + write!(f, "cargo {}.{}.{}", self.major, self.minor, self.patch)?; - match self.cfg_info.as_ref().map(|ci| &ci.release_channel) { - Some(channel) => { - if channel != "stable" { - write!(f, "-{}", channel)?; - let empty = String::from(""); - write!(f, "{}", self.pre_release.as_ref().unwrap_or(&empty))?; - } - }, - None => (), + if let Some(channel) = self.cfg_info.as_ref().map(|ci| &ci.release_channel) { + if channel != "stable" { + write!(f, "-{}", channel)?; + let empty = String::from(""); + write!(f, "{}", self.pre_release.as_ref().unwrap_or(&empty))?; + } }; if let Some(ref cfg) = self.cfg_info { - match cfg.commit_info { - Some(ref ci) => { - write!(f, " ({} {})", - ci.short_commit_hash, ci.commit_date)?; - }, - None => { - write!(f, " (built {})", - cfg.build_date)?; - } + if let Some(ref ci) = cfg.commit_info { + write!(f, " ({} {})", + ci.short_commit_hash, ci.commit_date)?; } }; Ok(()) @@ -124,17 +119,8 @@ exec(flags, config) } -// This will diverge if `result` is an `Err` and return otherwise. -pub fn process_executed(result: CliResult, shell: &mut MultiShell) -{ - match result { - Err(e) => handle_cli_error(e, shell), - Ok(()) => {} - } -} - -pub fn print_json(obj: &T) { - let encoded = json::encode(&obj).unwrap(); +pub fn print_json(obj: &T) { + let encoded = serde_json::to_string(&obj).unwrap(); println!("{}", encoded); } @@ -183,8 +169,8 @@ } } -pub fn handle_cli_error(err: CliError, shell: &mut MultiShell) -> ! { - debug!("handle_cli_error; err={:?}", err); +pub fn exit_with_error(err: CliError, shell: &mut MultiShell) -> ! { + debug!("exit_with_error; err={:?}", err); let CliError { error, exit_code, unknown } = err; // exit_code == 0 is non-fatal error, e.g. docopt version info @@ -260,12 +246,11 @@ } }); VersionInfo { - major: option_env_str!("CFG_VERSION_MAJOR").unwrap(), - minor: option_env_str!("CFG_VERSION_MINOR").unwrap(), - patch: option_env_str!("CFG_VERSION_PATCH").unwrap(), - pre_release: option_env_str!("CFG_PRERELEASE_VERSION"), + major: env_str!("CARGO_PKG_VERSION_MAJOR"), + minor: env_str!("CARGO_PKG_VERSION_MINOR"), + patch: env_str!("CARGO_PKG_VERSION_PATCH"), + pre_release: option_env_str!("CARGO_PKG_VERSION_PRE"), cfg_info: Some(CfgInfo { - build_date: option_env_str!("CFG_BUILD_DATE").unwrap(), release_channel: option_env_str!("CFG_RELEASE_CHANNEL").unwrap(), commit_info: commit_info, }), diff -Nru cargo-0.17.0/src/cargo/ops/cargo_compile.rs cargo-0.19.0/src/cargo/ops/cargo_compile.rs --- cargo-0.17.0/src/cargo/ops/cargo_compile.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_compile.rs 2017-05-16 03:23:10.000000000 +0000 @@ -22,12 +22,14 @@ //! previously compiled dependency //! -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; +use std::default::Default; use std::path::PathBuf; use std::sync::Arc; use core::{Source, Package, Target}; -use core::{Profile, TargetKind, Profiles, Workspace, PackageIdSpec}; +use core::{Profile, TargetKind, Profiles, Workspace, PackageId, PackageIdSpec}; +use core::resolver::Resolve; use ops::{self, BuildOutput, Executor, DefaultExecutor}; use util::config::Config; use util::{CargoResult, profile}; @@ -100,7 +102,7 @@ Json } -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum Packages<'a> { All, Packages(&'a [String]), @@ -124,7 +126,10 @@ } pub enum CompileFilter<'a> { - Everything, + Everything { + /// Flag whether targets can be safely skipped when required-features are not satisfied. + required_features_filterable: bool, + }, Only { lib: bool, bins: &'a [String], @@ -140,7 +145,7 @@ } pub fn compile_with_exec<'a>(ws: &Workspace<'a>, - options: &CompileOptions<'a>, + options: &CompileOptions<'a>, exec: Arc) -> CargoResult> { for member in ws.members() { @@ -187,7 +192,9 @@ } } else { let root_package = ws.current()?; - generate_targets(root_package, profiles, mode, filter, release)?; + let all_features = resolve_all_features(&resolve_with_overrides, + root_package.package_id()); + generate_targets(root_package, profiles, mode, filter, &all_features, release)?; pkgids.push(root_package.package_id()); }; @@ -204,8 +211,10 @@ panic!("`rustc` and `rustdoc` should not accept multiple `-p` flags") } (Some(args), _) => { + let all_features = resolve_all_features(&resolve_with_overrides, + to_builds[0].package_id()); let targets = generate_targets(to_builds[0], profiles, - mode, filter, release)?; + mode, filter, &all_features, release)?; if targets.len() == 1 { let (target, profile) = targets[0]; let mut profile = profile.clone(); @@ -218,8 +227,10 @@ } } (None, Some(args)) => { + let all_features = resolve_all_features(&resolve_with_overrides, + to_builds[0].package_id()); let targets = generate_targets(to_builds[0], profiles, - mode, filter, release)?; + mode, filter, &all_features, release)?; if targets.len() == 1 { let (target, profile) = targets[0]; let mut profile = profile.clone(); @@ -233,8 +244,10 @@ } (None, None) => { for &to_build in to_builds.iter() { + let all_features = resolve_all_features(&resolve_with_overrides, + to_build.package_id()); let targets = generate_targets(to_build, profiles, mode, - filter, release)?; + filter, &all_features, release)?; package_targets.push((to_build, targets)); } } @@ -268,7 +281,24 @@ ret.to_doc_test = to_builds.iter().map(|&p| p.clone()).collect(); - Ok(ret) + return Ok(ret); + + fn resolve_all_features(resolve_with_overrides: &Resolve, + package_id: &PackageId) + -> HashSet { + let mut features = resolve_with_overrides.features(package_id).clone(); + + // Include features enabled for use by dependencies so targets can also use them with the + // required-features field when deciding whether to be built or skipped. + let deps = resolve_with_overrides.deps(package_id); + for dep in deps { + for feature in resolve_with_overrides.features(dep) { + features.insert(dep.name().to_string() + "/" + feature); + } + } + + features + } } impl<'a> CompileFilter<'a> { @@ -284,13 +314,15 @@ tests: tests, } } else { - CompileFilter::Everything + CompileFilter::Everything { + required_features_filterable: true, + } } } pub fn matches(&self, target: &Target) -> bool { match *self { - CompileFilter::Everything => true, + CompileFilter::Everything { .. } => true, CompileFilter::Only { lib, bins, examples, tests, benches } => { let list = match *target.kind() { TargetKind::Bin => bins, @@ -313,6 +345,7 @@ profiles: &'a Profiles, mode: CompileMode, filter: &CompileFilter, + features: &HashSet, release: bool) -> CargoResult> { let build = if release {&profiles.release} else {&profiles.dev}; @@ -325,13 +358,13 @@ CompileMode::Doc { .. } => &profiles.doc, CompileMode::Doctest => &profiles.doctest, }; - match *filter { - CompileFilter::Everything => { + let mut targets = match *filter { + CompileFilter::Everything { .. } => { match mode { CompileMode::Bench => { - Ok(pkg.targets().iter().filter(|t| t.benched()).map(|t| { + pkg.targets().iter().filter(|t| t.benched()).map(|t| { (t, profile) - }).collect::>()) + }).collect::>() } CompileMode::Test => { let deps = if release { @@ -352,16 +385,16 @@ base.push((t, deps)); } } - Ok(base) + base } CompileMode::Build | CompileMode::Check => { - Ok(pkg.targets().iter().filter(|t| { + pkg.targets().iter().filter(|t| { t.is_bin() || t.is_lib() - }).map(|t| (t, profile)).collect()) + }).map(|t| (t, profile)).collect() } CompileMode::Doc { .. } => { - Ok(pkg.targets().iter().filter(|t| t.documented()) - .map(|t| (t, profile)).collect()) + pkg.targets().iter().filter(|t| t.documented()) + .map(|t| (t, profile)).collect() } CompileMode::Doctest => { if let Some(t) = pkg.targets().iter().find(|t| t.is_lib()) { @@ -370,7 +403,7 @@ } } - Ok(Vec::new()) + Vec::new() } } } @@ -409,6 +442,7 @@ } }; debug!("found {} `{}`", desc, name); + targets.push((t, profile)); } Ok(()) @@ -418,9 +452,39 @@ find(tests, "test", Target::is_test, test)?; find(benches, "bench", Target::is_bench, &profiles.bench)?; } - Ok(targets) + targets + } + }; + + //Collect the targets that are libraries or have all required features available. + let mut compatible_targets = Vec::with_capacity(targets.len()); + for (target, profile) in targets.drain(0..) { + if target.is_lib() || match target.required_features() { + Some(f) => f.iter().all(|f| features.contains(f)), + None => true, + } { + compatible_targets.push((target, profile)); + continue; + } + + if match *filter { + CompileFilter::Everything { required_features_filterable } => + !required_features_filterable, + CompileFilter::Only { .. } => true, + } { + let required_features = target.required_features().unwrap(); + let quoted_required_features: Vec = required_features.iter() + .map(|s| format!("`{}`",s)) + .collect(); + bail!("target `{}` requires the features: {}\n\ + Consider enabling them by passing e.g. `--features=\"{}\"`", + target.name(), + quoted_required_features.join(", "), + required_features.join(" ")); } } + + Ok(compatible_targets) } /// Parse all config files to learn about build configuration. Currently @@ -492,7 +556,13 @@ rerun_if_changed: Vec::new(), warnings: Vec::new(), }; + // We require deterministic order of evaluation, so we must sort the pairs by key first. + let mut pairs = Vec::new(); for (k, value) in value.table(&lib_name)?.0 { + pairs.push((k,value)); + } + pairs.sort_by_key( |p| p.0 ); + for (k,value) in pairs{ let key = format!("{}.{}", key, k); match &k[..] { "rustc-flags" => { diff -Nru cargo-0.17.0/src/cargo/ops/cargo_generate_lockfile.rs cargo-0.19.0/src/cargo/ops/cargo_generate_lockfile.rs --- cargo-0.17.0/src/cargo/ops/cargo_generate_lockfile.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_generate_lockfile.rs 2017-05-16 03:23:10.000000000 +0000 @@ -146,7 +146,7 @@ // we'll let it through. // // Note that we only check this for non-registry sources, - // however, as registries countain enough version information in + // however, as registries contain enough version information in // the package id to disambiguate if a.source_id().is_registry() { return false diff -Nru cargo-0.17.0/src/cargo/ops/cargo_install.rs cargo-0.19.0/src/cargo/ops/cargo_install.rs --- cargo-0.17.0/src/cargo/ops/cargo_install.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_install.rs 2017-05-16 03:23:10.000000000 +0000 @@ -19,13 +19,18 @@ use util::{CargoResult, ChainError, Config, human, internal}; use util::{Filesystem, FileLock}; -#[derive(RustcDecodable, RustcEncodable)] +#[derive(Deserialize, Serialize)] +#[serde(untagged)] enum CrateListing { V1(CrateListingV1), - Empty, + Empty(Empty), } -#[derive(RustcDecodable, RustcEncodable)] +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +struct Empty {} + +#[derive(Deserialize, Serialize)] struct CrateListingV1 { v1: BTreeMap>, } @@ -92,7 +97,7 @@ }; let ws = match overidden_target_dir { - Some(dir) => Workspace::ephemeral(pkg, config, Some(dir))?, + Some(dir) => Workspace::ephemeral(pkg, config, Some(dir), false)?, None => Workspace::new(pkg.manifest_path(), config)?, }; let pkg = ws.current()?; @@ -129,6 +134,10 @@ bail!("Binary `{:?}` name can't be serialized into string", name) } }).collect::>()?; + if binaries.is_empty() { + bail!("no binaries are available for install using the selected \ + features"); + } let metadata = metadata(config, &root)?; let mut list = read_crate_list(metadata.file())?; @@ -349,7 +358,7 @@ filter: &ops::CompileFilter, prev: &CrateListingV1, force: bool) -> CargoResult>> { - if let CompileFilter::Everything = *filter { + if let CompileFilter::Everything { .. } = *filter { // If explicit --bin or --example flags were passed then those'll // get checked during cargo_compile, we only care about the "build // everything" case here @@ -390,7 +399,7 @@ } }; match *filter { - CompileFilter::Everything => { + CompileFilter::Everything { .. } => { pkg.targets().iter() .filter(|t| t.is_bin()) .filter_map(|t| check(t.name())) @@ -408,12 +417,12 @@ (|| -> CargoResult<_> { let mut contents = String::new(); file.read_to_string(&mut contents)?; - let listing = toml::decode_str(&contents).chain_error(|| { + let listing = toml::from_str(&contents).chain_error(|| { internal("invalid TOML found for metadata") })?; match listing { CrateListing::V1(v1) => Ok(v1), - CrateListing::Empty => { + CrateListing::Empty(_) => { Ok(CrateListingV1 { v1: BTreeMap::new() }) } } @@ -426,7 +435,7 @@ (|| -> CargoResult<_> { file.seek(SeekFrom::Start(0))?; file.set_len(0)?; - let data = toml::encode_str::(&CrateListing::V1(listing)); + let data = toml::to_string(&CrateListing::V1(listing))?; file.write_all(data.as_bytes())?; Ok(()) }).chain_error(|| { diff -Nru cargo-0.17.0/src/cargo/ops/cargo_new.rs cargo-0.19.0/src/cargo/ops/cargo_new.rs --- cargo-0.17.0/src/cargo/ops/cargo_new.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_new.rs 2017-05-16 03:23:10.000000000 +0000 @@ -10,13 +10,13 @@ use term::color::BLACK; use core::Workspace; -use util::{GitRepo, HgRepo, CargoResult, human, ChainError, internal}; +use util::{GitRepo, HgRepo, PijulRepo, CargoResult, human, ChainError, internal}; use util::{Config, paths}; use toml; #[derive(Clone, Copy, Debug, PartialEq)] -pub enum VersionControl { Git, Hg, NoVcs } +pub enum VersionControl { Git, Hg, Pijul, NoVcs } pub struct NewOptions<'a> { pub version_control: Option, @@ -45,6 +45,7 @@ Ok(match &d.read_str()?[..] { "git" => VersionControl::Git, "hg" => VersionControl::Hg, + "pijul" => VersionControl::Pijul, "none" => VersionControl::NoVcs, n => { let err = format!("could not decode '{}' as version control", n); @@ -331,10 +332,15 @@ num_detected_vsces += 1; } + if fs::metadata(&path.join(".pijul")).is_ok() { + version_control = Some(VersionControl::Pijul); + num_detected_vsces += 1; + } + // if none exists, maybe create git, like in `cargo new` if num_detected_vsces > 1 { - bail!("both .git and .hg directories found \ + bail!("more than one of .hg, .git, or .pijul directories found \ and the ignore file can't be \ filled in as a result, \ specify --vcs to override detection"); @@ -377,19 +383,17 @@ let path = opts.path; let name = opts.name; let cfg = global_config(config)?; - let mut ignore = "target\n".to_string(); - let in_existing_vcs_repo = existing_vcs_repo(path.parent().unwrap(), config.cwd()); - if !opts.bin { - ignore.push_str("Cargo.lock\n"); - } + let ignore = ["target/\n", "**/*.rs.bk\n", + if !opts.bin { "Cargo.lock\n" } else { "" }] + .concat(); + let in_existing_vcs_repo = existing_vcs_repo(path.parent().unwrap(), config.cwd()); let vcs = match (opts.version_control, cfg.version_control, in_existing_vcs_repo) { (None, None, false) => VersionControl::Git, (None, Some(option), false) => option, (Some(option), _, _) => option, (_, _, true) => VersionControl::NoVcs, }; - match vcs { VersionControl::Git => { if !fs::metadata(&path.join(".git")).is_ok() { @@ -403,13 +407,18 @@ } paths::append(&path.join(".hgignore"), ignore.as_bytes())?; }, + VersionControl::Pijul => { + if !fs::metadata(&path.join(".pijul")).is_ok() { + PijulRepo::init(path, config.cwd())?; + } + }, VersionControl::NoVcs => { fs::create_dir_all(path)?; }, }; let (author_name, email) = discover_author()?; - // Hoo boy, sure glad we've got exhaustivenes checking behind us. + // Hoo boy, sure glad we've got exhaustiveness checking behind us. let author = match (cfg.name, cfg.email, author_name, email) { (Some(name), Some(email), _, _) | (Some(name), None, _, Some(email)) | @@ -421,7 +430,7 @@ let mut cargotoml_path_specifier = String::new(); - // Calculare what [lib] and [[bin]]s do we need to append to Cargo.toml + // Calculate what [lib] and [[bin]]s do we need to append to Cargo.toml for i in &opts.source_files { if i.bin { diff -Nru cargo-0.17.0/src/cargo/ops/cargo_output_metadata.rs cargo-0.19.0/src/cargo/ops/cargo_output_metadata.rs --- cargo-0.17.0/src/cargo/ops/cargo_output_metadata.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_output_metadata.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,4 +1,4 @@ -use rustc_serialize::{Encodable, Encoder}; +use serde::ser::{self, Serialize}; use core::resolver::Resolve; use core::{Package, PackageId, Workspace}; @@ -67,7 +67,7 @@ }) } -#[derive(RustcEncodable)] +#[derive(Serialize)] pub struct ExportInfo { packages: Vec, workspace_members: Vec, @@ -75,38 +75,29 @@ version: u32, } -/// Newtype wrapper to provide a custom `Encodable` implementation. +/// Newtype wrapper to provide a custom `Serialize` implementation. /// The one from lockfile does not fit because it uses a non-standard /// format for `PackageId`s -struct MetadataResolve{ +#[derive(Serialize)] +struct MetadataResolve { + #[serde(rename = "nodes", serialize_with = "serialize_resolve")] resolve: Resolve, root: Option, } -impl Encodable for MetadataResolve { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - #[derive(RustcEncodable)] - struct EncodableResolve<'a> { - root: Option<&'a PackageId>, - nodes: Vec>, - } +fn serialize_resolve(resolve: &Resolve, s: S) -> Result + where S: ser::Serializer, +{ + #[derive(Serialize)] + struct Node<'a> { + id: &'a PackageId, + dependencies: Vec<&'a PackageId>, + } - #[derive(RustcEncodable)] - struct Node<'a> { - id: &'a PackageId, - dependencies: Vec<&'a PackageId>, + resolve.iter().map(|id| { + Node { + id: id, + dependencies: resolve.deps(id).collect(), } - - let encodable = EncodableResolve { - root: self.root.as_ref(), - nodes: self.resolve.iter().map(|id| { - Node { - id: id, - dependencies: self.resolve.deps(id).collect(), - } - }).collect(), - }; - - encodable.encode(s) - } + }).collect::>().serialize(s) } diff -Nru cargo-0.17.0/src/cargo/ops/cargo_package.rs cargo-0.19.0/src/cargo/ops/cargo_package.rs --- cargo-0.17.0/src/cargo/ops/cargo_package.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_package.rs 2017-05-16 03:23:10.000000000 +0000 @@ -173,7 +173,8 @@ if dirty.is_empty() { Ok(()) } else { - bail!("{} dirty files found in the working directory:\n\n{}\n\n\ + bail!("{} files in the working directory contain changes that were \ + not yet committed into git:\n\n{}\n\n\ to proceed despite this, pass the `--allow-dirty` flag", dirty.len(), dirty.join("\n")) } @@ -284,7 +285,7 @@ let new_pkg = Package::new(new_manifest, &manifest_path); // Now that we've rewritten all our path dependencies, compile it! - let ws = Workspace::ephemeral(new_pkg, config, None)?; + let ws = Workspace::ephemeral(new_pkg, config, None, true)?; ops::compile_ws(&ws, None, &ops::CompileOptions { config: config, jobs: opts.jobs, @@ -293,7 +294,7 @@ no_default_features: false, all_features: false, spec: ops::Packages::Packages(&[]), - filter: ops::CompileFilter::Everything, + filter: ops::CompileFilter::Everything { required_features_filterable: true }, release: false, message_format: ops::MessageFormat::Human, mode: ops::CompileMode::Build, diff -Nru cargo-0.17.0/src/cargo/ops/cargo_run.rs cargo-0.19.0/src/cargo/ops/cargo_run.rs --- cargo-0.17.0/src/cargo/ops/cargo_run.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_run.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,24 +1,36 @@ use std::path::Path; -use ops::{self, CompileFilter}; -use util::{self, CargoResult, ProcessError}; +use ops::{self, CompileFilter, Packages}; +use util::{self, human, CargoResult, ProcessError}; use core::Workspace; pub fn run(ws: &Workspace, options: &ops::CompileOptions, args: &[String]) -> CargoResult> { let config = ws.config(); - let root = ws.current()?; - let mut bins = root.manifest().targets().iter().filter(|a| { + let pkg = match options.spec { + Packages::All => unreachable!("cargo run supports single package only"), + Packages::Packages(xs) => match xs.len() { + 0 => ws.current()?, + 1 => ws.members() + .find(|pkg| pkg.name() == xs[0]) + .ok_or_else(|| human( + format!("package `{}` is not a member of the workspace", xs[0]) + ))?, + _ => unreachable!("cargo run supports single package only"), + } + }; + + let mut bins = pkg.manifest().targets().iter().filter(|a| { !a.is_lib() && !a.is_custom_build() && match options.filter { - CompileFilter::Everything => a.is_bin(), + CompileFilter::Everything { .. } => a.is_bin(), CompileFilter::Only { .. } => options.filter.matches(a), } }); if bins.next().is_none() { match options.filter { - CompileFilter::Everything => { + CompileFilter::Everything { .. } => { bail!("a bin target must be available for `cargo run`") } CompileFilter::Only { .. } => { @@ -28,7 +40,7 @@ } if bins.next().is_some() { match options.filter { - CompileFilter::Everything => { + CompileFilter::Everything { .. } => { bail!("`cargo run` requires that a project only have one \ executable; use the `--bin` option to specify which one \ to run") @@ -41,6 +53,7 @@ } let compile = ops::compile(ws, options)?; + assert_eq!(compile.binaries.len(), 1); let exe = &compile.binaries[0]; let exe = match util::without_prefix(&exe, config.cwd()) { Some(path) if path.file_name() == Some(path.as_os_str()) @@ -48,7 +61,7 @@ Some(path) => path.to_path_buf(), None => exe.to_path_buf(), }; - let mut process = compile.target_process(exe, &root)?; + let mut process = compile.target_process(exe, &pkg)?; process.args(args).cwd(config.cwd()); config.shell().status("Running", process.to_string())?; diff -Nru cargo-0.17.0/src/cargo/ops/cargo_rustc/compilation.rs cargo-0.19.0/src/cargo/ops/cargo_rustc/compilation.rs --- cargo-0.17.0/src/cargo/ops/cargo_rustc/compilation.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_rustc/compilation.rs 2017-05-16 03:23:10.000000000 +0000 @@ -3,7 +3,7 @@ use std::path::PathBuf; use semver::Version; -use core::{PackageId, Package, Target}; +use core::{PackageId, Package, Target, TargetKind}; use util::{self, CargoResult, Config, ProcessBuilder, process, join_paths}; /// A structure returning the result of a compilation. @@ -13,12 +13,12 @@ pub libraries: HashMap>, /// An array of all tests created during this compilation. - pub tests: Vec<(Package, String, PathBuf)>, + pub tests: Vec<(Package, TargetKind, String, PathBuf)>, /// An array of all binaries created. pub binaries: Vec, - /// All directires for the output of native build commands. + /// All directories for the output of native build commands. /// /// This is currently used to drive some entries which are added to the /// LD_LIBRARY_PATH as appropriate. @@ -102,7 +102,10 @@ } else { let mut search_path = vec![]; - // Add -L arguments, after stripping off prefixes like "native=" or "framework=". + // Add -L arguments, after stripping off prefixes like "native=" + // or "framework=" and filtering out directories *not* inside our + // output directory, since they are likely spurious and can cause + // clashes with system shared libraries (issue #3366). for dir in self.native_dirs.iter() { let dir = match dir.to_str() { Some(s) => { @@ -118,7 +121,10 @@ } None => dir.clone(), }; - search_path.push(dir); + + if dir.starts_with(&self.root_output) { + search_path.push(dir); + } } search_path.push(self.root_output.clone()); search_path.push(self.deps_output.clone()); @@ -137,6 +143,13 @@ let metadata = pkg.manifest().metadata(); + let cargo_exe = self.config.cargo_exe()?; + cmd.env(::CARGO_ENV, cargo_exe); + + // When adding new environment variables depending on + // crate properties which might require rebuild upon change + // consider adding the corresponding properties to the hash + // in Context::target_metadata() cmd.env("CARGO_MANIFEST_DIR", pkg.root()) .env("CARGO_PKG_VERSION_MAJOR", &pkg.version().major.to_string()) .env("CARGO_PKG_VERSION_MINOR", &pkg.version().minor.to_string()) diff -Nru cargo-0.17.0/src/cargo/ops/cargo_rustc/context.rs cargo-0.19.0/src/cargo/ops/cargo_rustc/context.rs --- cargo-0.17.0/src/cargo/ops/cargo_rustc/context.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_rustc/context.rs 2017-05-16 03:23:10.000000000 +0000 @@ -11,7 +11,7 @@ use core::{Package, PackageId, PackageSet, Resolve, Target, Profile}; use core::{TargetKind, Profiles, Dependency, Workspace}; use core::dependency::Kind as DepKind; -use util::{self, CargoResult, ChainError, internal, Config, profile, Cfg, human}; +use util::{self, CargoResult, ChainError, internal, Config, profile, Cfg, CfgExpr, human}; use super::TargetConfig; use super::custom_build::{BuildState, BuildScripts}; @@ -78,7 +78,10 @@ // Enable incremental builds if the user opts in. For now, // this is an environment variable until things stabilize a // bit more. - let incremental_enabled = env::var("CARGO_INCREMENTAL").is_ok(); + let incremental_enabled = match env::var("CARGO_INCREMENTAL") { + Ok(v) => v == "1", + Err(_) => false, + }; Ok(Context { ws: ws, @@ -175,6 +178,7 @@ -> CargoResult<()> { let rustflags = env_args(self.config, &self.build_config, + &self.info(&kind), kind, "RUSTFLAGS")?; let mut process = self.config.rustc()?.process(); @@ -396,16 +400,16 @@ // to pull crates from anywhere w/o worrying about conflicts unit.pkg.package_id().hash(&mut hasher); + // Add package properties which map to environment variables + // exposed by Cargo + let manifest_metadata = unit.pkg.manifest().metadata(); + manifest_metadata.authors.hash(&mut hasher); + manifest_metadata.description.hash(&mut hasher); + manifest_metadata.homepage.hash(&mut hasher); + // Also mix in enabled features to our metadata. This'll ensure that // when changing feature sets each lib is separately cached. - match self.resolve.features(unit.pkg.package_id()) { - Some(features) => { - let mut feat_vec: Vec<&String> = features.iter().collect(); - feat_vec.sort(); - feat_vec.hash(&mut hasher); - } - None => Vec::<&String>::new().hash(&mut hasher), - } + self.resolve.features_sorted(unit.pkg.package_id()).hash(&mut hasher); // Throw in the profile we're compiling with. This helps caching // panic=abort and panic=unwind artifacts, additionally with various @@ -441,7 +445,7 @@ /// Returns a tuple with the directory and name of the hard link we expect /// our target to be copied to. Eg, file_stem may be out_dir/deps/foo-abcdef /// and link_stem would be out_dir/foo - /// This function returns it in two parts so the caller can add prefix/suffis + /// This function returns it in two parts so the caller can add prefix/suffix /// to filename separately /// Returns an Option because in some cases we don't want to link @@ -598,11 +602,8 @@ // If the dependency is optional, then we're only activating it // if the corresponding feature was activated - if d.is_optional() { - match self.resolve.features(id) { - Some(f) if f.contains(d.name()) => {} - _ => return false, - } + if d.is_optional() && !self.resolve.features(id).contains(d.name()) { + return false; } // If we've gotten past all that, then this dependency is @@ -646,7 +647,15 @@ // Integration tests/benchmarks require binaries to be built if unit.profile.test && (unit.target.is_test() || unit.target.is_bench()) { - ret.extend(unit.pkg.targets().iter().filter(|t| t.is_bin()).map(|t| { + ret.extend(unit.pkg.targets().iter().filter(|t| { + let no_required_features = Vec::new(); + + t.is_bin() && + // Skip binaries with required features that have not been selected. + t.required_features().unwrap_or(&no_required_features).iter().all(|f| { + self.resolve.features(id).contains(f) + }) + }).map(|t| { Unit { pkg: unit.pkg, target: t, @@ -864,22 +873,30 @@ } pub fn rustflags_args(&self, unit: &Unit) -> CargoResult> { - env_args(self.config, &self.build_config, unit.kind, "RUSTFLAGS") + env_args(self.config, &self.build_config, self.info(&unit.kind), unit.kind, "RUSTFLAGS") } pub fn rustdocflags_args(&self, unit: &Unit) -> CargoResult> { - env_args(self.config, &self.build_config, unit.kind, "RUSTDOCFLAGS") + env_args(self.config, &self.build_config, self.info(&unit.kind), unit.kind, "RUSTDOCFLAGS") } pub fn show_warnings(&self, pkg: &PackageId) -> bool { pkg.source_id().is_path() || self.config.extra_verbose() } + + fn info(&self, kind: &Kind) -> &TargetInfo { + match *kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + } + } } // Acquire extra flags to pass to the compiler from the // RUSTFLAGS environment variable and similar config values fn env_args(config: &Config, build_config: &BuildConfig, + target_info: &TargetInfo, kind: Kind, name: &str) -> CargoResult> { // We *want* to apply RUSTFLAGS only to builds for the @@ -920,13 +937,34 @@ return Ok(args.collect()); } + let mut rustflags = Vec::new(); + let name = name.chars().flat_map(|c| c.to_lowercase()).collect::(); - // Then the target.*.rustflags value + // Then the target.*.rustflags value... let target = build_config.requested_target.as_ref().unwrap_or(&build_config.host_triple); let key = format!("target.{}.{}", target, name); if let Some(args) = config.get_list_or_split_string(&key)? { let args = args.val.into_iter(); - return Ok(args.collect()); + rustflags.extend(args); + } + // ...including target.'cfg(...)'.rustflags + if let Some(ref target_cfg) = target_info.cfg { + if let Some(table) = config.get_table("target")? { + let cfgs = table.val.iter().map(|(t, _)| (CfgExpr::from_str(t), t)) + .filter_map(|(c, n)| c.map(|c| (c, n)).ok()) + .filter(|&(ref c, _)| c.matches(target_cfg)); + for (_, n) in cfgs { + let key = format!("target.'{}'.{}", n, name); + if let Some(args) = config.get_list_or_split_string(&key)? { + let args = args.val.into_iter(); + rustflags.extend(args); + } + } + } + } + + if !rustflags.is_empty() { + return Ok(rustflags); } // Then the build.rustflags value diff -Nru cargo-0.17.0/src/cargo/ops/cargo_rustc/custom_build.rs cargo-0.19.0/src/cargo/ops/cargo_rustc/custom_build.rs --- cargo-0.17.0/src/cargo/ops/cargo_rustc/custom_build.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_rustc/custom_build.rs 2017-05-16 03:23:10.000000000 +0000 @@ -120,10 +120,8 @@ // Be sure to pass along all enabled features for this package, this is the // last piece of statically known information that we have. - if let Some(features) = cx.resolve.features(unit.pkg.package_id()) { - for feat in features.iter() { - cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1"); - } + for feat in cx.resolve.features(unit.pkg.package_id()).iter() { + cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1"); } let mut cfg_map = HashMap::new(); @@ -171,7 +169,7 @@ let kind = unit.kind; let json_messages = cx.build_config.json_messages; - // Check to see if the build script as already run, and if it has keep + // Check to see if the build script has already run, and if it has keep // track of whether it has told us about some explicit dependencies let prev_output = BuildOutput::parse_file(&output_file, &pkg_name).ok(); let rerun_if_changed = match prev_output { @@ -352,10 +350,10 @@ match key { "rustc-flags" => { - let (libs, links) = + let (paths, links) = BuildOutput::parse_rustc_flags(value, &whence)?; library_links.extend(links.into_iter()); - library_paths.extend(libs.into_iter()); + library_paths.extend(paths.into_iter()); } "rustc-link-lib" => library_links.push(value.to_string()), "rustc-link-search" => library_paths.push(PathBuf::from(value)), @@ -381,7 +379,7 @@ let value = value.trim(); let mut flags_iter = value.split(|c: char| c.is_whitespace()) .filter(|w| w.chars().any(|c| !c.is_whitespace())); - let (mut library_links, mut library_paths) = (Vec::new(), Vec::new()); + let (mut library_paths, mut library_links) = (Vec::new(), Vec::new()); loop { let flag = match flags_iter.next() { Some(f) => f, diff -Nru cargo-0.17.0/src/cargo/ops/cargo_rustc/fingerprint.rs cargo-0.19.0/src/cargo/ops/cargo_rustc/fingerprint.rs --- cargo-0.17.0/src/cargo/ops/cargo_rustc/fingerprint.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_rustc/fingerprint.rs 2017-05-16 03:23:10.000000000 +0000 @@ -6,7 +6,9 @@ use std::sync::{Arc, Mutex}; use filetime::FileTime; -use rustc_serialize::{json, Encodable, Decodable, Encoder, Decoder}; +use serde::ser::{self, Serialize}; +use serde::de::{self, Deserialize}; +use serde_json; use core::{Package, TargetKind}; use util; @@ -125,18 +127,48 @@ /// `DependencyQueue`, but it also needs to be retained here because Cargo can /// be interrupted while executing, losing the state of the `DependencyQueue` /// graph. +#[derive(Serialize, Deserialize)] pub struct Fingerprint { rustc: u64, features: String, target: u64, profile: u64, + #[serde(serialize_with = "serialize_deps", deserialize_with = "deserialize_deps")] deps: Vec<(String, Arc)>, local: LocalFingerprint, + #[serde(skip_serializing, skip_deserializing)] memoized_hash: Mutex>, rustflags: Vec, } -#[derive(RustcEncodable, RustcDecodable, Hash)] +fn serialize_deps(deps: &Vec<(String, Arc)>, ser: S) + -> Result + where S: ser::Serializer, +{ + deps.iter().map(|&(ref a, ref b)| { + (a, b.hash()) + }).collect::>().serialize(ser) +} + +fn deserialize_deps(d: D) -> Result)>, D::Error> + where D: de::Deserializer, +{ + let decoded = >::deserialize(d)?; + Ok(decoded.into_iter().map(|(name, hash)| { + (name, Arc::new(Fingerprint { + rustc: 0, + target: 0, + profile: 0, + local: LocalFingerprint::Precalculated(String::new()), + features: String::new(), + deps: Vec::new(), + memoized_hash: Mutex::new(Some(hash)), + rustflags: Vec::new(), + })) + }).collect()) +} + +#[derive(Serialize, Deserialize, Hash)] enum LocalFingerprint { Precalculated(String), MtimeBased(MtimeSlot, PathBuf), @@ -242,79 +274,27 @@ } } -impl Encodable for Fingerprint { - fn encode(&self, e: &mut E) -> Result<(), E::Error> { - e.emit_struct("Fingerprint", 6, |e| { - e.emit_struct_field("rustc", 0, |e| self.rustc.encode(e))?; - e.emit_struct_field("target", 1, |e| self.target.encode(e))?; - e.emit_struct_field("profile", 2, |e| self.profile.encode(e))?; - e.emit_struct_field("local", 3, |e| self.local.encode(e))?; - e.emit_struct_field("features", 4, |e| { - self.features.encode(e) - })?; - e.emit_struct_field("deps", 5, |e| { - self.deps.iter().map(|&(ref a, ref b)| { - (a, b.hash()) - }).collect::>().encode(e) - })?; - e.emit_struct_field("rustflags", 6, |e| self.rustflags.encode(e))?; - Ok(()) - }) - } -} - -impl Decodable for Fingerprint { - fn decode(d: &mut D) -> Result { - fn decode(d: &mut D) -> Result { - Decodable::decode(d) - } - d.read_struct("Fingerprint", 6, |d| { - Ok(Fingerprint { - rustc: d.read_struct_field("rustc", 0, decode)?, - target: d.read_struct_field("target", 1, decode)?, - profile: d.read_struct_field("profile", 2, decode)?, - local: d.read_struct_field("local", 3, decode)?, - features: d.read_struct_field("features", 4, decode)?, - memoized_hash: Mutex::new(None), - deps: { - let decode = decode::, D>; - let v = d.read_struct_field("deps", 5, decode)?; - v.into_iter().map(|(name, hash)| { - (name, Arc::new(Fingerprint { - rustc: 0, - target: 0, - profile: 0, - local: LocalFingerprint::Precalculated(String::new()), - features: String::new(), - deps: Vec::new(), - memoized_hash: Mutex::new(Some(hash)), - rustflags: Vec::new(), - })) - }).collect() - }, - rustflags: d.read_struct_field("rustflags", 6, decode)?, - }) - }) - } -} - impl hash::Hash for MtimeSlot { fn hash(&self, h: &mut H) { self.0.lock().unwrap().hash(h) } } -impl Encodable for MtimeSlot { - fn encode(&self, e: &mut E) -> Result<(), E::Error> { +impl ser::Serialize for MtimeSlot { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { self.0.lock().unwrap().map(|ft| { (ft.seconds_relative_to_1970(), ft.nanoseconds()) - }).encode(e) + }).serialize(s) } } -impl Decodable for MtimeSlot { - fn decode(e: &mut D) -> Result { - let kind: Option<(u64, u32)> = Decodable::decode(e)?; +impl de::Deserialize for MtimeSlot { + fn deserialize(d: D) -> Result + where D: de::Deserializer, + { + let kind: Option<(u64, u32)> = de::Deserialize::deserialize(d)?; Ok(MtimeSlot(Mutex::new(kind.map(|(s, n)| { FileTime::from_seconds_since_1970(s, n) })))) @@ -339,16 +319,6 @@ return Ok(s.clone()) } - // First, calculate all statically known "salt data" such as the profile - // information (compiler flags), the compiler version, activated features, - // and target configuration. - let features = cx.resolve.features(unit.pkg.package_id()); - let features = features.map(|s| { - let mut v = s.iter().collect::>(); - v.sort(); - v - }); - // Next, recursively calculate the fingerprint for all of our dependencies. // // Skip the fingerprints of build scripts as they may not always be @@ -385,7 +355,7 @@ rustc: util::hash_u64(&cx.config.rustc()?.verbose_version), target: util::hash_u64(&unit.target), profile: util::hash_u64(&unit.profile), - features: format!("{:?}", features), + features: format!("{:?}", cx.resolve.features_sorted(unit.pkg.package_id())), deps: deps, local: local, memoized_hash: Mutex::new(None), @@ -507,25 +477,21 @@ debug!("write fingerprint: {}", loc.display()); paths::write(&loc, util::to_hex(hash).as_bytes())?; paths::write(&loc.with_extension("json"), - json::encode(&fingerprint).unwrap().as_bytes())?; + &serde_json::to_vec(&fingerprint).unwrap())?; Ok(()) } -/// Prepare work for when a package starts to build +/// Prepare for work when a package starts to build pub fn prepare_init(cx: &mut Context, unit: &Unit) -> CargoResult<()> { let new1 = cx.fingerprint_dir(unit); - let new2 = new1.clone(); if fs::metadata(&new1).is_err() { fs::create_dir(&new1)?; } - if fs::metadata(&new2).is_err() { - fs::create_dir(&new2)?; - } + Ok(()) } -/// Returns the (old, new) location for the dep info file of a target. pub fn dep_info_loc(cx: &mut Context, unit: &Unit) -> PathBuf { cx.fingerprint_dir(unit).join(&format!("dep-{}", filename(cx, unit))) } @@ -540,7 +506,7 @@ } let old_fingerprint_json = paths::read(&loc.with_extension("json"))?; - let old_fingerprint = json::decode(&old_fingerprint_json).chain_error(|| { + let old_fingerprint = serde_json::from_str(&old_fingerprint_json).chain_error(|| { internal(format!("failed to deserialize json")) })?; new_fingerprint.compare(&old_fingerprint) @@ -587,16 +553,13 @@ let mut paths = Vec::new(); let mut deps = deps.split(' ').map(|s| s.trim()).filter(|s| !s.is_empty()); - loop { - let mut file = match deps.next() { - Some(s) => s.to_string(), - None => break, - }; - while file.ends_with("\\") { + while let Some(s) = deps.next() { + let mut file = s.to_string(); + while file.ends_with('\\') { file.pop(); file.push(' '); file.push_str(deps.next().chain_error(|| { - internal(format!("malformed dep-info format, trailing \\")) + internal("malformed dep-info format, trailing \\".to_string()) })?); } paths.push(cwd.join(&file)); @@ -606,7 +569,7 @@ fn dep_info_mtime_if_fresh(dep_info: &Path) -> CargoResult> { if let Some(paths) = parse_dep_info(dep_info)? { - Ok(mtime_if_fresh(&dep_info, paths.iter())) + Ok(mtime_if_fresh(dep_info, paths.iter())) } else { Ok(None) } diff -Nru cargo-0.17.0/src/cargo/ops/cargo_rustc/job_queue.rs cargo-0.19.0/src/cargo/ops/cargo_rustc/job_queue.rs --- cargo-0.17.0/src/cargo/ops/cargo_rustc/job_queue.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_rustc/job_queue.rs 2017-05-16 03:23:10.000000000 +0000 @@ -182,6 +182,9 @@ match result { Ok(()) => self.finish(key, cx)?, Err(e) => { + let msg = "The following warnings were emitted during compilation:"; + self.emit_warnings(Some(msg), key, cx)?; + if self.active > 0 { error = Some(human("build failed")); handle_error(&*e, &mut *cx.config.shell()); @@ -189,6 +192,7 @@ "Build failed, waiting for other \ jobs to finish...", YELLOW)?; } + if error.is_none() { error = Some(e); } @@ -252,15 +256,33 @@ Ok(()) } - fn finish(&mut self, key: Key<'a>, cx: &mut Context) -> CargoResult<()> { - if key.profile.run_custom_build && cx.show_warnings(key.pkg) { - let output = cx.build_state.outputs.lock().unwrap(); - if let Some(output) = output.get(&(key.pkg.clone(), key.kind)) { - for warning in output.warnings.iter() { - cx.config.shell().warn(warning)?; + fn emit_warnings(&self, msg: Option<&str>, key: Key<'a>, cx: &mut Context) -> CargoResult<()> { + let output = cx.build_state.outputs.lock().unwrap(); + if let Some(output) = output.get(&(key.pkg.clone(), key.kind)) { + if let Some(msg) = msg { + if !output.warnings.is_empty() { + writeln!(cx.config.shell().err(), "{}\n", msg)?; } } + + for warning in output.warnings.iter() { + cx.config.shell().warn(warning)?; + } + + if !output.warnings.is_empty() && msg.is_some() { + // Output an empty line. + writeln!(cx.config.shell().err(), "")?; + } } + + Ok(()) + } + + fn finish(&mut self, key: Key<'a>, cx: &mut Context) -> CargoResult<()> { + if key.profile.run_custom_build && cx.show_warnings(key.pkg) { + self.emit_warnings(None, key, cx)?; + } + let state = self.pending.get_mut(&key).unwrap(); state.amt -= 1; if state.amt == 0 { diff -Nru cargo-0.17.0/src/cargo/ops/cargo_rustc/layout.rs cargo-0.19.0/src/cargo/ops/cargo_rustc/layout.rs --- cargo-0.17.0/src/cargo/ops/cargo_rustc/layout.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_rustc/layout.rs 2017-05-16 03:23:10.000000000 +0000 @@ -76,7 +76,7 @@ // the target triple as a Path and then just use the file stem as the // component for the directory name. if let Some(triple) = triple { - path.push(Path::new(triple).file_stem().ok_or(human(format!("target was empty")))?); + path.push(Path::new(triple).file_stem().ok_or(human("target was empty".to_string()))?); } path.push(dest); Layout::at(ws.config(), path) diff -Nru cargo-0.17.0/src/cargo/ops/cargo_rustc/mod.rs cargo-0.19.0/src/cargo/ops/cargo_rustc/mod.rs --- cargo-0.17.0/src/cargo/ops/cargo_rustc/mod.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_rustc/mod.rs 2017-05-16 03:23:10.000000000 +0000 @@ -6,7 +6,7 @@ use std::path::{self, PathBuf}; use std::sync::Arc; -use rustc_serialize::json; +use serde_json; use core::{Package, PackageId, PackageSet, Target, Resolve}; use core::{Profile, Profiles, Workspace}; @@ -75,9 +75,9 @@ cmd: ProcessBuilder, _id: &PackageId, handle_stdout: &mut FnMut(&str) -> CargoResult<()>, - handle_srderr: &mut FnMut(&str) -> CargoResult<()>) + handle_stderr: &mut FnMut(&str) -> CargoResult<()>) -> Result<(), ProcessError> { - cmd.exec_with_streaming(handle_stdout, handle_srderr)?; + cmd.exec_with_streaming(handle_stdout, handle_stderr)?; Ok(()) } } @@ -147,6 +147,7 @@ if unit.profile.test { cx.compilation.tests.push((unit.pkg.clone(), + unit.target.kind().clone(), unit.target.name().to_string(), dst)); } else if unit.target.is_bin() || unit.target.is_example() { @@ -180,18 +181,17 @@ })); } - if let Some(feats) = cx.resolve.features(&unit.pkg.package_id()) { - cx.compilation.cfgs.entry(unit.pkg.package_id().clone()) - .or_insert(HashSet::new()) - .extend(feats.iter().map(|feat| format!("feature=\"{}\"", feat))); - } + let feats = cx.resolve.features(&unit.pkg.package_id()); + cx.compilation.cfgs.entry(unit.pkg.package_id().clone()) + .or_insert_with(HashSet::new) + .extend(feats.iter().map(|feat| format!("feature=\"{}\"", feat))); output_depinfo(&mut cx, unit)?; } for (&(ref pkg, _), output) in cx.build_state.outputs.lock().unwrap().iter() { cx.compilation.cfgs.entry(pkg.clone()) - .or_insert(HashSet::new()) + .or_insert_with(HashSet::new) .extend(output.cfgs.iter().cloned()); for dir in output.library_paths.iter() { @@ -229,11 +229,9 @@ } else { rustc(cx, unit, exec.clone())? }; - let link_work1 = link_targets(cx, unit)?; - let link_work2 = link_targets(cx, unit)?; // Need to link targets on both the dirty and fresh - let dirty = work.then(link_work1).then(dirty); - let fresh = link_work2.then(fresh); + let dirty = work.then(link_targets(cx, unit, false)?).then(dirty); + let fresh = link_targets(cx, unit, true)?.then(fresh); (dirty, fresh, freshness) }; jobs.enqueue(cx, unit, Job::new(dirty, fresh), freshness)?; @@ -252,12 +250,16 @@ let mut rustc = prepare_rustc(cx, crate_types, unit)?; let name = unit.pkg.name().to_string(); + + // If this is an upstream dep we don't want warnings from, turn off all + // lints. if !cx.show_warnings(unit.pkg.package_id()) { - if cx.config.rustc()?.cap_lints { - rustc.arg("--cap-lints").arg("allow"); - } else { - rustc.arg("-Awarnings"); - } + rustc.arg("--cap-lints").arg("allow"); + + // If this is an upstream dep but we *do* want warnings, make sure that they + // don't fail compilation. + } else if !unit.pkg.package_id().source_id().is_path() { + rustc.arg("--cap-lints").arg("warn"); } let filenames = cx.target_filenames(unit)?; @@ -290,12 +292,6 @@ let json_messages = cx.build_config.json_messages; let package_id = unit.pkg.package_id().clone(); let target = unit.target.clone(); - let profile = unit.profile.clone(); - let features = cx.resolve.features(unit.pkg.package_id()) - .into_iter() - .flat_map(|i| i) - .map(|s| s.to_string()) - .collect::>(); exec.init(cx); let exec = exec.clone(); @@ -342,9 +338,9 @@ }, &mut |line| { // stderr from rustc can have a mix of JSON and non-JSON output - if line.starts_with("{") { + if line.starts_with('{') { // Handle JSON lines - let compiler_message = json::Json::from_str(line).map_err(|_| { + let compiler_message = serde_json::from_str(line).map_err(|_| { internal(&format!("compiler produced invalid json: `{}`", line)) })?; @@ -389,18 +385,6 @@ fingerprint::append_current_dir(&dep_info_loc, &cwd)?; } - if json_messages { - machine_message::emit(machine_message::Artifact { - package_id: &package_id, - target: &target, - profile: &profile, - features: features, - filenames: filenames.iter().map(|&(ref src, _, _)| { - src.display().to_string() - }).collect(), - }); - } - Ok(()) })); @@ -436,21 +420,36 @@ /// Link the compiled target (often of form foo-{metadata_hash}) to the /// final target. This must happen during both "Fresh" and "Compile" -fn link_targets(cx: &mut Context, unit: &Unit) -> CargoResult { +fn link_targets(cx: &mut Context, unit: &Unit, fresh: bool) -> CargoResult { let filenames = cx.target_filenames(unit)?; + let package_id = unit.pkg.package_id().clone(); + let target = unit.target.clone(); + let profile = unit.profile.clone(); + let features = cx.resolve.features_sorted(&package_id).into_iter() + .map(|s| s.to_owned()) + .collect(); + let json_messages = cx.build_config.json_messages; + Ok(Work::new(move |_| { // If we're a "root crate", e.g. the target of this compilation, then we // hard link our outputs out of the `deps` directory into the directory // above. This means that `cargo build` will produce binaries in // `target/debug` which one probably expects. - for (src, link_dst, _linkable) in filenames { + let mut destinations = vec![]; + for &(ref src, ref link_dst, _linkable) in filenames.iter() { // This may have been a `cargo rustc` command which changes the // output, so the source may not actually exist. - debug!("Thinking about linking {} to {:?}", src.display(), link_dst); - if !src.exists() || link_dst.is_none() { + if !src.exists() { continue } - let dst = link_dst.unwrap(); + let dst = match link_dst.as_ref() { + Some(dst) => dst, + None => { + destinations.push(src.display().to_string()); + continue; + } + }; + destinations.push(dst.display().to_string()); debug!("linking {} to {}", src.display(), dst.display()); if dst.exists() { @@ -458,16 +457,27 @@ human(format!("failed to remove: {}", dst.display())) })?; } - fs::hard_link(&src, &dst) + fs::hard_link(src, dst) .or_else(|err| { debug!("hard link failed {}. falling back to fs::copy", err); - fs::copy(&src, &dst).map(|_| ()) + fs::copy(src, dst).map(|_| ()) }) .chain_error(|| { human(format!("failed to link or copy `{}` to `{}`", src.display(), dst.display())) })?; } + + if json_messages { + machine_message::emit(machine_message::Artifact { + package_id: &package_id, + target: &target, + profile: &profile, + features: features, + filenames: destinations, + fresh: fresh, + }); + } Ok(()) })) } @@ -531,10 +541,8 @@ rustdoc.arg("-o").arg(doc_dir); - if let Some(features) = cx.resolve.features(unit.pkg.package_id()) { - for feat in features { - rustdoc.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); - } + for feat in cx.resolve.features(unit.pkg.package_id()) { + rustdoc.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); } if let Some(ref args) = unit.profile.rustdoc_args { @@ -589,8 +597,8 @@ crate_types: &[&str]) { let Profile { ref opt_level, lto, codegen_units, ref rustc_args, debuginfo, - debug_assertions, rpath, test, doc: _doc, run_custom_build, - ref panic, rustdoc_args: _, check, + debug_assertions, overflow_checks, rpath, test, doc: _doc, + run_custom_build, ref panic, rustdoc_args: _, check, } = *unit.profile; assert!(!run_custom_build); @@ -670,10 +678,27 @@ cmd.args(args); } - if debug_assertions && opt_level != "0" { - cmd.args(&["-C", "debug-assertions=on"]); - } else if !debug_assertions && opt_level == "0" { - cmd.args(&["-C", "debug-assertions=off"]); + // -C overflow-checks is implied by the setting of -C debug-assertions, + // so we only need to provide -C overflow-checks if it differs from + // the value of -C debug-assertions we would provide. + if opt_level != "0" { + if debug_assertions { + cmd.args(&["-C", "debug-assertions=on"]); + if !overflow_checks { + cmd.args(&["-C", "overflow-checks=off"]); + } + } else if overflow_checks { + cmd.args(&["-C", "overflow-checks=on"]); + } + } else { + if !debug_assertions { + cmd.args(&["-C", "debug-assertions=off"]); + if overflow_checks { + cmd.args(&["-C", "overflow-checks=on"]); + } + } else if !overflow_checks { + cmd.args(&["-C", "overflow-checks=off"]); + } } if test && unit.target.harness() { @@ -682,10 +707,8 @@ cmd.arg("--cfg").arg("test"); } - if let Some(features) = cx.resolve.features(unit.pkg.package_id()) { - for feat in features.iter() { - cmd.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); - } + for feat in cx.resolve.features(unit.pkg.package_id()).iter() { + cmd.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); } match cx.target_metadata(unit) { diff -Nru cargo-0.17.0/src/cargo/ops/cargo_rustc/output_depinfo.rs cargo-0.19.0/src/cargo/ops/cargo_rustc/output_depinfo.rs --- cargo-0.17.0/src/cargo/ops/cargo_rustc/output_depinfo.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_rustc/output_depinfo.rs 2017-05-16 03:23:10.000000000 +0000 @@ -22,7 +22,7 @@ fn add_deps_for_unit<'a, 'b>(deps: &mut HashSet, context: &mut Context<'a, 'b>, unit: &Unit<'a>, visited: &mut HashSet>) -> CargoResult<()> { - if !visited.insert(unit.clone()) { + if !visited.insert(*unit) { return Ok(()); } @@ -76,13 +76,10 @@ // dep-info generation failed, so delete output file. This will usually // cause the build system to always rerun the build rule, which is correct // if inefficient. - match fs::remove_file(output_path) { - Err(err) => { - if err.kind() != ErrorKind::NotFound { - return Err(err.into()); - } + if let Err(err) = fs::remove_file(output_path) { + if err.kind() != ErrorKind::NotFound { + return Err(err.into()); } - _ => () } } } diff -Nru cargo-0.17.0/src/cargo/ops/cargo_test.rs cargo-0.19.0/src/cargo/ops/cargo_test.rs --- cargo-0.17.0/src/cargo/ops/cargo_test.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/cargo_test.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,7 +1,7 @@ use std::ffi::{OsString, OsStr}; use ops::{self, Compilation}; -use util::{self, CargoResult, CargoTestError, ProcessError}; +use util::{self, CargoResult, CargoTestError, Test, ProcessError}; use core::Workspace; pub struct TestOptions<'a> { @@ -19,7 +19,7 @@ if options.no_run { return Ok(None) } - let mut errors = if options.only_doc { + let (test, mut errors) = if options.only_doc { run_doc_tests(options, test_args, &compilation)? } else { run_unit_tests(options, test_args, &compilation)? @@ -27,7 +27,7 @@ // If we have an error and want to fail fast, return if !errors.is_empty() && !options.no_fail_fast { - return Ok(Some(CargoTestError::new(errors))) + return Ok(Some(CargoTestError::new(test, errors))) } // If a specific test was requested or we're not running any tests at all, @@ -35,15 +35,16 @@ if let ops::CompileFilter::Only { .. } = options.compile_opts.filter { match errors.len() { 0 => return Ok(None), - _ => return Ok(Some(CargoTestError::new(errors))) + _ => return Ok(Some(CargoTestError::new(test, errors))) } } - errors.extend(run_doc_tests(options, test_args, &compilation)?); + let (doctest, docerrors) = run_doc_tests(options, test_args, &compilation)?; + errors.extend(docerrors); if errors.is_empty() { Ok(None) } else { - Ok(Some(CargoTestError::new(errors))) + Ok(Some(CargoTestError::new(doctest, errors))) } } @@ -57,10 +58,10 @@ if options.no_run { return Ok(None) } - let errors = run_unit_tests(options, &args, &compilation)?; + let (test, errors) = run_unit_tests(options, &args, &compilation)?; match errors.len() { 0 => Ok(None), - _ => Ok(Some(CargoTestError::new(errors))), + _ => Ok(Some(CargoTestError::new(test, errors))), } } @@ -69,7 +70,7 @@ -> CargoResult> { let mut compilation = ops::compile(ws, &options.compile_opts)?; compilation.tests.sort_by(|a, b| { - (a.0.package_id(), &a.1).cmp(&(b.0.package_id(), &b.1)) + (a.0.package_id(), &a.1, &a.2).cmp(&(b.0.package_id(), &b.1, &b.2)) }); Ok(compilation) } @@ -78,14 +79,14 @@ fn run_unit_tests(options: &TestOptions, test_args: &[String], compilation: &Compilation) - -> CargoResult> { + -> CargoResult<(Test, Vec)> { let config = options.compile_opts.config; let cwd = options.compile_opts.config.cwd(); let mut errors = Vec::new(); - for &(ref pkg, _, ref exe) in &compilation.tests { - let to_display = match util::without_prefix(exe, &cwd) { + for &(ref pkg, ref kind, ref test, ref exe) in &compilation.tests { + let to_display = match util::without_prefix(exe, cwd) { Some(path) => path, None => &**exe, }; @@ -101,23 +102,23 @@ if let Err(e) = cmd.exec() { errors.push(e); if !options.no_fail_fast { - break + return Ok((Test::UnitTest(kind.clone(), test.clone()), errors)) } } } - Ok(errors) + Ok((Test::Multiple, errors)) } fn run_doc_tests(options: &TestOptions, test_args: &[String], compilation: &Compilation) - -> CargoResult> { + -> CargoResult<(Test, Vec)> { let mut errors = Vec::new(); let config = options.compile_opts.config; // We don't build/rust doctests if target != host if config.rustc()?.host != compilation.target { - return Ok(errors); + return Ok((Test::Doc, errors)); } let libs = compilation.to_doc_test.iter().map(|package| { @@ -141,11 +142,11 @@ p.arg("-L").arg(native_dep); } - if test_args.len() > 0 { - p.arg("--test-args").arg(&test_args.join(" ")); + for arg in test_args { + p.arg("--test-args").arg(arg); } - if let Some(cfgs) = compilation.cfgs.get(&package.package_id()) { + if let Some(cfgs) = compilation.cfgs.get(package.package_id()) { for cfg in cfgs.iter() { p.arg("--cfg").arg(cfg); } @@ -179,10 +180,10 @@ if let Err(e) = p.exec() { errors.push(e); if !options.no_fail_fast { - return Ok(errors); + return Ok((Test::Doc, errors)); } } } } - Ok(errors) + Ok((Test::Doc, errors)) } diff -Nru cargo-0.17.0/src/cargo/ops/lockfile.rs cargo-0.19.0/src/cargo/ops/lockfile.rs --- cargo-0.17.0/src/cargo/ops/lockfile.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/lockfile.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,7 +1,6 @@ use std::io::prelude::*; -use rustc_serialize::{Encodable, Decodable}; -use toml::{self, Encoder, Value}; +use toml; use core::{Resolve, resolver, Workspace}; use core::resolver::WorkspaceResolve; @@ -22,10 +21,8 @@ })?; (|| { - let table = cargo_toml::parse(&s, f.path(), ws.config())?; - let table = toml::Value::Table(table); - let mut d = toml::Decoder::new(table); - let v: resolver::EncodableResolve = Decodable::decode(&mut d)?; + let resolve = cargo_toml::parse(&s, f.path(), ws.config())?; + let v: resolver::EncodableResolve = resolve.try_into()?; Ok(Some(v.into_resolve(ws)?)) }).chain_error(|| { human(format!("failed to parse lock file at: {}", f.path().display())) @@ -50,24 +47,23 @@ true }; - let mut e = Encoder::new(); - WorkspaceResolve { + let toml = toml::Value::try_from(WorkspaceResolve { ws: ws, resolve: resolve, use_root_key: use_root_key, - }.encode(&mut e).unwrap(); + }).unwrap(); let mut out = String::new(); // Note that we do not use e.toml.to_string() as we want to control the // exact format the toml is in to ensure pretty diffs between updates to the // lockfile. - if let Some(root) = e.toml.get(&"root".to_string()) { + if let Some(root) = toml.get("root") { out.push_str("[root]\n"); emit_package(root.as_table().unwrap(), &mut out); } - let deps = e.toml.get(&"package".to_string()).unwrap().as_slice().unwrap(); + let deps = toml["package"].as_array().unwrap(); for dep in deps.iter() { let dep = dep.as_table().unwrap(); @@ -75,12 +71,9 @@ emit_package(dep, &mut out); } - match e.toml.get(&"metadata".to_string()) { - Some(metadata) => { - out.push_str("[metadata]\n"); - out.push_str(&metadata.to_string()); - } - None => {} + if let Some(meta) = toml.get("metadata") { + out.push_str("[metadata]\n"); + out.push_str(&meta.to_string()); } // If the lockfile contents haven't changed so don't rewrite it. This is @@ -120,16 +113,16 @@ } } -fn emit_package(dep: &toml::Table, out: &mut String) { - out.push_str(&format!("name = {}\n", lookup(dep, "name"))); - out.push_str(&format!("version = {}\n", lookup(dep, "version"))); +fn emit_package(dep: &toml::value::Table, out: &mut String) { + out.push_str(&format!("name = {}\n", &dep["name"])); + out.push_str(&format!("version = {}\n", &dep["version"])); if dep.contains_key("source") { - out.push_str(&format!("source = {}\n", lookup(dep, "source"))); + out.push_str(&format!("source = {}\n", &dep["source"])); } if let Some(ref s) = dep.get("dependencies") { - let slice = Value::as_slice(*s).unwrap(); + let slice = s.as_array().unwrap(); if !slice.is_empty() { out.push_str("dependencies = [\n"); @@ -142,10 +135,6 @@ } out.push_str("\n"); } else if dep.contains_key("replace") { - out.push_str(&format!("replace = {}\n\n", lookup(dep, "replace"))); + out.push_str(&format!("replace = {}\n\n", &dep["replace"])); } } - -fn lookup<'a>(table: &'a toml::Table, key: &str) -> &'a toml::Value { - table.get(key).expect(&format!("didn't find {}", key)) -} diff -Nru cargo-0.17.0/src/cargo/ops/registry.rs cargo-0.19.0/src/cargo/ops/registry.rs --- cargo-0.17.0/src/cargo/ops/registry.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/registry.rs 2017-05-16 03:23:10.000000000 +0000 @@ -5,7 +5,7 @@ use std::path::PathBuf; use std::time::Duration; -use curl::easy::Easy; +use curl::easy::{Easy, SslOpt}; use git2; use registry::{Registry, NewCrate, NewCrateDependency}; use term::color::BLACK; @@ -51,7 +51,7 @@ let (mut registry, reg_id) = registry(opts.config, opts.token.clone(), opts.index.clone())?; - verify_dependencies(&pkg, ®_id)?; + verify_dependencies(pkg, ®_id)?; // Prepare a tarball, with a non-surpressable warning if metadata // is missing since this is being put online. @@ -66,7 +66,7 @@ // Upload said tarball to the specified destination opts.config.shell().status("Uploading", pkg.package_id().to_string())?; - transmit(opts.config, &pkg, tarball.file(), &mut registry, opts.dry_run)?; + transmit(opts.config, pkg, tarball.file(), &mut registry, opts.dry_run)?; Ok(()) } @@ -121,13 +121,10 @@ Some(ref readme) => Some(paths::read(&pkg.root().join(readme))?), None => None, }; - match *license_file { - Some(ref file) => { - if fs::metadata(&pkg.root().join(file)).is_err() { - bail!("the license file `{}` does not exist", file) - } + if let Some(ref file) = *license_file { + if fs::metadata(&pkg.root().join(file)).is_err() { + bail!("the license file `{}` does not exist", file) } - None => {} } // Do not upload if performing a dry run @@ -234,6 +231,9 @@ if let Some(cainfo) = config.get_path("http.cainfo")? { handle.cainfo(&cainfo.val)?; } + if let Some(check) = config.get_bool("http.check-revoke")? { + handle.ssl_options(SslOpt::new().no_revoke(!check.val))?; + } if let Some(timeout) = http_timeout(config)? { handle.connect_timeout(Duration::new(timeout as u64, 0))?; handle.low_speed_time(Duration::new(timeout as u64, 0))?; @@ -246,18 +246,13 @@ /// Favor cargo's `http.proxy`, then git's `http.proxy`. Proxies specified /// via environment variables are picked up by libcurl. fn http_proxy(config: &Config) -> CargoResult> { - match config.get_string("http.proxy")? { - Some(s) => return Ok(Some(s.val)), - None => {} - } - match git2::Config::open_default() { - Ok(cfg) => { - match cfg.get_str("http.proxy") { - Ok(s) => return Ok(Some(s.to_string())), - Err(..) => {} - } + if let Some(s) = config.get_string("http.proxy")? { + return Ok(Some(s.val)) + } + if let Ok(cfg) = git2::Config::open_default() { + if let Ok(s) = cfg.get_str("http.proxy") { + return Ok(Some(s.to_string())) } - Err(..) => {} } Ok(None) } @@ -282,9 +277,8 @@ } pub fn http_timeout(config: &Config) -> CargoResult> { - match config.get_i64("http.timeout")? { - Some(s) => return Ok(Some(s.val)), - None => {} + if let Some(s) = config.get_i64("http.timeout")? { + return Ok(Some(s.val)) } Ok(env::var("HTTP_TIMEOUT").ok().and_then(|s| s.parse().ok())) } @@ -293,11 +287,8 @@ let RegistryConfig { index, token: _ } = registry_configuration(config)?; let mut map = HashMap::new(); let p = config.cwd().to_path_buf(); - match index { - Some(index) => { - map.insert("index".to_string(), ConfigValue::String(index, p.clone())); - } - None => {} + if let Some(index) = index { + map.insert("index".to_string(), ConfigValue::String(index, p.clone())); } map.insert("token".to_string(), ConfigValue::String(token, p)); @@ -327,28 +318,22 @@ let (mut registry, _) = registry(config, opts.token.clone(), opts.index.clone())?; - match opts.to_add { - Some(ref v) => { - let v = v.iter().map(|s| &s[..]).collect::>(); - config.shell().status("Owner", format!("adding {:?} to crate {}", - v, name))?; - registry.add_owners(&name, &v).map_err(|e| { - human(format!("failed to add owners to crate {}: {}", name, e)) - })?; - } - None => {} + if let Some(ref v) = opts.to_add { + let v = v.iter().map(|s| &s[..]).collect::>(); + config.shell().status("Owner", format!("adding {:?} to crate {}", + v, name))?; + registry.add_owners(&name, &v).map_err(|e| { + human(format!("failed to add owners to crate {}: {}", name, e)) + })?; } - match opts.to_remove { - Some(ref v) => { - let v = v.iter().map(|s| &s[..]).collect::>(); - config.shell().status("Owner", format!("removing {:?} from crate {}", - v, name))?; - registry.remove_owners(&name, &v).map_err(|e| { - human(format!("failed to remove owners from crate {}: {}", name, e)) - })?; - } - None => {} + if let Some(ref v) = opts.to_remove { + let v = v.iter().map(|s| &s[..]).collect::>(); + config.shell().status("Owner", format!("removing {:?} from crate {}", + v, name))?; + registry.remove_owners(&name, &v).map_err(|e| { + human(format!("failed to remove owners from crate {}: {}", name, e)) + })?; } if opts.list { @@ -424,7 +409,7 @@ let list_items = crates.iter() .map(|krate| ( - format!("{} ({})", krate.name, krate.max_version), + format!("{} = \"{}\"", krate.name, krate.max_version), krate.description.as_ref().map(|desc| truncate_with_ellipsis(&desc.replace("\n", " "), 128)) )) @@ -439,7 +424,7 @@ Some(desc) => { let space = repeat(' ').take(description_margin - name.len()) .collect::(); - name + &space + &desc + name + &space + "# " + &desc } None => name }; diff -Nru cargo-0.17.0/src/cargo/ops/resolve.rs cargo-0.19.0/src/cargo/ops/resolve.rs --- cargo-0.17.0/src/cargo/ops/resolve.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/ops/resolve.rs 2017-05-16 03:23:10.000000000 +0000 @@ -37,16 +37,30 @@ registry.add_preloaded(source); } - // First, resolve the root_package's *listed* dependencies, as well as - // downloading and updating all remotes and such. - let resolve = resolve_with_registry(ws, &mut registry)?; - - // Second, resolve with precisely what we're doing. Filter out - // transitive dependencies if necessary, specify features, handle - // overrides, etc. - let _p = profile::start("resolving w/ overrides..."); + let resolve = if ws.require_optional_deps() { + // First, resolve the root_package's *listed* dependencies, as well as + // downloading and updating all remotes and such. + let resolve = resolve_with_registry(ws, &mut registry)?; + + // Second, resolve with precisely what we're doing. Filter out + // transitive dependencies if necessary, specify features, handle + // overrides, etc. + let _p = profile::start("resolving w/ overrides..."); + + add_overrides(&mut registry, ws)?; + + for &(ref replace_spec, ref dep) in ws.root_replace() { + if !resolve.iter().any(|r| replace_spec.matches(r) && !dep.matches_id(r)) { + ws.config().shell().warn( + format!("package replacement is not used: {}", replace_spec) + )? + } + } - add_overrides(&mut registry, ws)?; + Some(resolve) + } else { + None + }; let method = if all_features { Method::Everything @@ -60,16 +74,8 @@ let resolved_with_overrides = ops::resolve_with_previous(&mut registry, ws, - method, Some(&resolve), None, - &specs)?; - - for &(ref replace_spec, _) in ws.root_replace() { - if !resolved_with_overrides.replacements().keys().any(|r| replace_spec.matches(r)) { - ws.config().shell().warn( - format!("package replacement is not used: {}", replace_spec) - )? - } - } + method, resolve.as_ref(), None, + specs)?; let packages = get_resolved_packages(&resolved_with_overrides, registry); @@ -159,8 +165,7 @@ // members in the workspace, so propagate the `Method::Everything`. Method::Everything => Method::Everything, - // If we're not resolving everything though then the workspace is - // already resolved and now we're drilling down from that to the + // If we're not resolving everything though then we're constructing the // exact crate graph we're going to build. Here we don't necessarily // want to keep around all workspace crates as they may not all be // built/tested. @@ -176,7 +181,6 @@ // base method with no features specified but using default features // for any other packages specified with `-p`. Method::Required { dev_deps, .. } => { - assert!(previous.is_some()); let base = Method::Required { dev_deps: dev_deps, features: &[], diff -Nru cargo-0.17.0/src/cargo/sources/directory.rs cargo-0.19.0/src/cargo/sources/directory.rs --- cargo-0.17.0/src/cargo/sources/directory.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/sources/directory.rs 2017-05-16 03:23:10.000000000 +0000 @@ -5,7 +5,7 @@ use std::path::{Path, PathBuf}; use rustc_serialize::hex::ToHex; -use rustc_serialize::json; +use serde_json; use core::{Package, PackageId, Summary, SourceId, Source, Dependency, Registry}; use sources::PathSource; @@ -19,7 +19,7 @@ config: &'cfg Config, } -#[derive(RustcDecodable)] +#[derive(Deserialize)] struct Checksum { package: String, files: HashMap, @@ -76,7 +76,7 @@ // crates and otherwise may conflict with a VCS // (rust-lang/cargo#3414). if let Some(s) = path.file_name().and_then(|s| s.to_str()) { - if s.starts_with(".") { + if s.starts_with('.') { continue } } @@ -93,7 +93,7 @@ pkg.package_id().version())) })?; - let cksum: Checksum = json::decode(&cksum).chain_error(|| { + let cksum: Checksum = serde_json::from_str(&cksum).chain_error(|| { human(format!("failed to decode `.cargo-checksum.json` of \ {} v{}", pkg.package_id().name(), diff -Nru cargo-0.17.0/src/cargo/sources/git/source.rs cargo-0.19.0/src/cargo/sources/git/source.rs --- cargo-0.17.0/src/cargo/sources/git/source.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/sources/git/source.rs 2017-05-16 03:23:10.000000000 +0000 @@ -146,7 +146,7 @@ trace!("updating git source `{:?}`", self.remote); - let repo = self.remote.checkout(&db_path, &self.config)?; + let repo = self.remote.checkout(&db_path, self.config)?; let rev = repo.rev_for(&self.reference)?; (repo, rev) } else { @@ -166,7 +166,7 @@ // in scope so the destructors here won't tamper with too much. // Checkout is immutable, so we don't need to protect it with a lock once // it is created. - repo.copy_to(actual_rev.clone(), &checkout_path, &self.config)?; + repo.copy_to(actual_rev.clone(), &checkout_path, self.config)?; let source_id = self.source_id.with_precise(Some(actual_rev.to_string())); let path_source = PathSource::new_recursive(&checkout_path, diff -Nru cargo-0.17.0/src/cargo/sources/git/utils.rs cargo-0.19.0/src/cargo/sources/git/utils.rs --- cargo-0.17.0/src/cargo/sources/git/utils.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/sources/git/utils.rs 2017-05-16 03:23:10.000000000 +0000 @@ -3,9 +3,9 @@ use std::fs::{self, File}; use std::path::{Path, PathBuf}; -use rustc_serialize::{Encodable, Encoder}; -use url::Url; use git2::{self, ObjectType}; +use serde::ser::{self, Serialize}; +use url::Url; use core::GitReference; use util::{CargoResult, ChainError, human, ToUrl, internal, Config, network}; @@ -13,6 +13,19 @@ #[derive(PartialEq, Clone, Debug)] pub struct GitRevision(git2::Oid); +impl ser::Serialize for GitRevision { + fn serialize(&self, s: S) -> Result { + serialize_str(self, s) + } +} + +fn serialize_str(t: &T, s: S) -> Result + where T: fmt::Display, + S: ser::Serializer, +{ + t.to_string().serialize(s) +} + impl fmt::Display for GitRevision { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.0, f) @@ -29,77 +42,34 @@ /// GitRemote represents a remote repository. It gets cloned into a local /// GitDatabase. -#[derive(PartialEq,Clone,Debug)] +#[derive(PartialEq, Clone, Debug, Serialize)] pub struct GitRemote { + #[serde(serialize_with = "serialize_str")] url: Url, } -#[derive(PartialEq,Clone,RustcEncodable)] -struct EncodableGitRemote { - url: String, -} - -impl Encodable for GitRemote { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - EncodableGitRemote { - url: self.url.to_string() - }.encode(s) - } -} - /// GitDatabase is a local clone of a remote repository's database. Multiple /// GitCheckouts can be cloned from this GitDatabase. +#[derive(Serialize)] pub struct GitDatabase { remote: GitRemote, path: PathBuf, + #[serde(skip_serializing)] repo: git2::Repository, } -#[derive(RustcEncodable)] -pub struct EncodableGitDatabase { - remote: GitRemote, - path: String, -} - -impl Encodable for GitDatabase { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - EncodableGitDatabase { - remote: self.remote.clone(), - path: self.path.display().to_string() - }.encode(s) - } -} - /// GitCheckout is a local checkout of a particular revision. Calling /// `clone_into` with a reference will resolve the reference into a revision, /// and return a CargoError if no revision for that reference was found. +#[derive(Serialize)] pub struct GitCheckout<'a> { database: &'a GitDatabase, location: PathBuf, revision: GitRevision, + #[serde(skip_serializing)] repo: git2::Repository, } -#[derive(RustcEncodable)] -pub struct EncodableGitCheckout { - database: EncodableGitDatabase, - location: String, - revision: String, -} - -impl<'a> Encodable for GitCheckout<'a> { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - EncodableGitCheckout { - location: self.location.display().to_string(), - revision: self.revision.to_string(), - database: EncodableGitDatabase { - remote: self.database.remote.clone(), - path: self.database.path.display().to_string(), - }, - }.encode(s) - } -} - // Implementations impl GitRemote { @@ -120,13 +90,13 @@ pub fn checkout(&self, into: &Path, cargo_config: &Config) -> CargoResult { let repo = match git2::Repository::open(into) { Ok(repo) => { - self.fetch_into(&repo, &cargo_config).chain_error(|| { + self.fetch_into(&repo, cargo_config).chain_error(|| { human(format!("failed to fetch into {}", into.display())) })?; repo } Err(..) => { - self.clone_into(into, &cargo_config).chain_error(|| { + self.clone_into(into, cargo_config).chain_error(|| { human(format!("failed to clone into: {}", into.display())) })? } @@ -152,7 +122,7 @@ // Create a local anonymous remote in the repository to fetch the url let url = self.url.to_string(); let refspec = "refs/heads/*:refs/heads/*"; - fetch(dst, &url, refspec, &cargo_config) + fetch(dst, &url, refspec, cargo_config) } fn clone_into(&self, dst: &Path, cargo_config: &Config) -> CargoResult { @@ -162,7 +132,7 @@ } fs::create_dir_all(dst)?; let repo = git2::Repository::init_bare(dst)?; - fetch(&repo, &url, "refs/heads/*:refs/heads/*", &cargo_config)?; + fetch(&repo, &url, "refs/heads/*:refs/heads/*", cargo_config)?; Ok(repo) } } @@ -178,7 +148,7 @@ Ok(repo) => { let checkout = GitCheckout::new(dest, self, rev, repo); if !checkout.is_fresh() { - checkout.fetch(&cargo_config)?; + checkout.fetch(cargo_config)?; checkout.reset()?; assert!(checkout.is_fresh()); } @@ -186,9 +156,7 @@ } Err(..) => GitCheckout::clone_into(dest, self, rev)?, }; - checkout.update_submodules(&cargo_config).chain_error(|| { - internal("failed to update submodules") - })?; + checkout.update_submodules(cargo_config)?; Ok(checkout) } @@ -217,7 +185,10 @@ } GitReference::Rev(ref s) => { let obj = self.repo.revparse_single(s)?; - obj.id() + match obj.as_tag() { + Some(tag) => tag.target_id(), + None => obj.id(), + } } }; Ok(GitRevision(id)) @@ -294,7 +265,7 @@ let url = self.database.path.to_url()?; let url = url.to_string(); let refspec = "refs/heads/*:refs/heads/*"; - fetch(&self.repo, &url, refspec, &cargo_config)?; + fetch(&self.repo, &url, refspec, cargo_config)?; Ok(()) } @@ -317,58 +288,67 @@ } fn update_submodules(&self, cargo_config: &Config) -> CargoResult<()> { - return update_submodules(&self.repo, &cargo_config); + return update_submodules(&self.repo, cargo_config); fn update_submodules(repo: &git2::Repository, cargo_config: &Config) -> CargoResult<()> { info!("update submodules for: {:?}", repo.workdir().unwrap()); for mut child in repo.submodules()?.into_iter() { - child.init(false)?; - let url = child.url().chain_error(|| { - internal("non-utf8 url for submodule") + update_submodule(repo, &mut child, cargo_config).chain_error(|| { + human(format!("failed to update submodule `{}`", + child.name().unwrap_or(""))) })?; + } + Ok(()) + } - // A submodule which is listed in .gitmodules but not actually - // checked out will not have a head id, so we should ignore it. - let head = match child.head_id() { - Some(head) => head, - None => continue, - }; - - // If the submodule hasn't been checked out yet, we need to - // clone it. If it has been checked out and the head is the same - // as the submodule's head, then we can bail out and go to the - // next submodule. - let head_and_repo = child.open().and_then(|repo| { - let target = repo.head()?.target(); - Ok((target, repo)) - }); - let repo = match head_and_repo { - Ok((head, repo)) => { - if child.head_id() == head { - continue - } - repo - } - Err(..) => { - let path = repo.workdir().unwrap().join(child.path()); - let _ = fs::remove_dir_all(&path); - git2::Repository::clone(url, &path)? + fn update_submodule(parent: &git2::Repository, + child: &mut git2::Submodule, + cargo_config: &Config) -> CargoResult<()> { + child.init(false)?; + let url = child.url().chain_error(|| { + internal("non-utf8 url for submodule") + })?; + + // A submodule which is listed in .gitmodules but not actually + // checked out will not have a head id, so we should ignore it. + let head = match child.head_id() { + Some(head) => head, + None => return Ok(()), + }; + + // If the submodule hasn't been checked out yet, we need to + // clone it. If it has been checked out and the head is the same + // as the submodule's head, then we can bail out and go to the + // next submodule. + let head_and_repo = child.open().and_then(|repo| { + let target = repo.head()?.target(); + Ok((target, repo)) + }); + let repo = match head_and_repo { + Ok((head, repo)) => { + if child.head_id() == head { + return Ok(()) } - }; + repo + } + Err(..) => { + let path = parent.workdir().unwrap().join(child.path()); + let _ = fs::remove_dir_all(&path); + git2::Repository::clone(url, &path)? + } + }; - // Fetch data from origin and reset to the head commit - let refspec = "refs/heads/*:refs/heads/*"; - fetch(&repo, url, refspec, &cargo_config).chain_error(|| { - internal(format!("failed to fetch submodule `{}` from {}", - child.name().unwrap_or(""), url)) - })?; + // Fetch data from origin and reset to the head commit + let refspec = "refs/heads/*:refs/heads/*"; + fetch(&repo, url, refspec, cargo_config).chain_error(|| { + internal(format!("failed to fetch submodule `{}` from {}", + child.name().unwrap_or(""), url)) + })?; - let obj = repo.find_object(head, None)?; - repo.reset(&obj, git2::ResetType::Hard, None)?; - update_submodules(&repo, &cargo_config)?; - } - Ok(()) + let obj = repo.find_object(head, None)?; + repo.reset(&obj, git2::ResetType::Hard, None)?; + update_submodules(&repo, cargo_config) } } } @@ -456,7 +436,7 @@ let username = username.unwrap(); debug_assert!(!ssh_username_requested); ssh_agent_attempts.push(username.to_string()); - return git2::Cred::ssh_key_from_agent(&username) + return git2::Cred::ssh_key_from_agent(username) } // Sometimes libgit2 will ask for a username/password in plaintext. This @@ -551,7 +531,7 @@ res.chain_error(|| { let mut msg = "failed to authenticate when downloading \ repository".to_string(); - if ssh_agent_attempts.len() > 0 { + if !ssh_agent_attempts.is_empty() { let names = ssh_agent_attempts.iter() .map(|s| format!("`{}`", s)) .collect::>() @@ -587,12 +567,12 @@ cb.credentials(f); // Create a local anonymous remote in the repository to fetch the url - let mut remote = repo.remote_anonymous(&url)?; + let mut remote = repo.remote_anonymous(url)?; let mut opts = git2::FetchOptions::new(); opts.remote_callbacks(cb) .download_tags(git2::AutotagOption::All); - network::with_retry(config, ||{ + network::with_retry(config, || { remote.fetch(&[refspec], Some(&mut opts), None) })?; Ok(()) diff -Nru cargo-0.17.0/src/cargo/sources/path.rs cargo-0.19.0/src/cargo/sources/path.rs --- cargo-0.17.0/src/cargo/sources/path.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/sources/path.rs 2017-05-16 03:23:10.000000000 +0000 @@ -107,10 +107,10 @@ .collect::, _>>()?; let mut filter = |p: &Path| { - let relative_path = util::without_prefix(p, &root).unwrap(); - include.iter().any(|p| p.matches_path(&relative_path)) || { + let relative_path = util::without_prefix(p, root).unwrap(); + include.iter().any(|p| p.matches_path(relative_path)) || { include.is_empty() && - !exclude.iter().any(|p| p.matches_path(&relative_path)) + !exclude.iter().any(|p| p.matches_path(relative_path)) } }; @@ -171,24 +171,24 @@ let index_files = index.iter().map(|entry| { use libgit2_sys::GIT_FILEMODE_COMMIT; let is_dir = entry.mode == GIT_FILEMODE_COMMIT as u32; - (join(&root, &entry.path), Some(is_dir)) + (join(root, &entry.path), Some(is_dir)) }); let mut opts = git2::StatusOptions::new(); opts.include_untracked(true); - if let Some(suffix) = util::without_prefix(pkg_path, &root) { + if let Some(suffix) = util::without_prefix(pkg_path, root) { opts.pathspec(suffix); } let statuses = repo.statuses(Some(&mut opts))?; let untracked = statuses.iter().filter_map(|entry| { match entry.status() { - git2::STATUS_WT_NEW => Some((join(&root, entry.path_bytes()), None)), + git2::STATUS_WT_NEW => Some((join(root, entry.path_bytes()), None)), _ => None } }); let mut subpackages_found = Vec::new(); - 'outer: for (file_path, is_dir) in index_files.chain(untracked) { + for (file_path, is_dir) in index_files.chain(untracked) { let file_path = file_path?; // Filter out files blatantly outside this package. This is helped a @@ -229,7 +229,7 @@ if is_dir.unwrap_or_else(|| file_path.is_dir()) { warn!(" found submodule {}", file_path.display()); - let rel = util::without_prefix(&file_path, &root).unwrap(); + let rel = util::without_prefix(&file_path, root).unwrap(); let rel = rel.to_str().chain_error(|| { human(format!("invalid utf-8 filename: {}", rel.display())) })?; diff -Nru cargo-0.17.0/src/cargo/sources/registry/index.rs cargo-0.19.0/src/cargo/sources/registry/index.rs --- cargo-0.17.0/src/cargo/sources/registry/index.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/sources/registry/index.rs 2017-05-16 03:23:10.000000000 +0000 @@ -3,7 +3,7 @@ use std::fs::File; use std::path::Path; -use rustc_serialize::json; +use serde_json; use core::dependency::{Dependency, DependencyInner, Kind}; use core::{SourceId, Summary, PackageId, Registry}; @@ -53,14 +53,14 @@ /// specified. pub fn summaries(&mut self, name: &str) -> CargoResult<&Vec<(Summary, bool)>> { if self.cache.contains_key(name) { - return Ok(self.cache.get(name).unwrap()); + return Ok(&self.cache[name]); } let summaries = self.load_summaries(name)?; let summaries = summaries.into_iter().filter(|summary| { summary.0.package_id().name() == name }).collect(); self.cache.insert(name.to_string(), summaries); - Ok(self.cache.get(name).unwrap()) + Ok(&self.cache[name]) } fn load_summaries(&mut self, name: &str) -> CargoResult> { @@ -96,7 +96,7 @@ let mut contents = String::new(); f.read_to_string(&mut contents)?; let ret: CargoResult>; - ret = contents.lines().filter(|l| l.trim().len() > 0) + ret = contents.lines().filter(|l| !l.trim().is_empty()) .map(|l| self.parse_registry_package(l)) .collect(); ret.chain_error(|| { @@ -116,7 +116,7 @@ -> CargoResult<(Summary, bool)> { let RegistryPackage { name, vers, cksum, deps, features, yanked - } = json::decode::(line)?; + } = serde_json::from_str::(line)?; let pkgid = PackageId::new(&name, &vers, &self.source_id)?; let deps: CargoResult> = deps.into_iter().map(|dep| { self.parse_registry_dependency(dep) diff -Nru cargo-0.17.0/src/cargo/sources/registry/mod.rs cargo-0.19.0/src/cargo/sources/registry/mod.rs --- cargo-0.17.0/src/cargo/sources/registry/mod.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/sources/registry/mod.rs 2017-05-16 03:23:10.000000000 +0000 @@ -184,7 +184,7 @@ index_locked: bool, } -#[derive(RustcDecodable)] +#[derive(Deserialize)] pub struct RegistryConfig { /// Download endpoint for all crates. This will be appended with /// `///download` and then will be hit with an HTTP GET @@ -196,7 +196,7 @@ pub api: String, } -#[derive(RustcDecodable)] +#[derive(Deserialize)] struct RegistryPackage { name: String, vers: String, @@ -206,7 +206,7 @@ yanked: Option, } -#[derive(RustcDecodable)] +#[derive(Deserialize)] struct RegistryDependency { name: String, req: String, diff -Nru cargo-0.17.0/src/cargo/sources/registry/remote.rs cargo-0.19.0/src/cargo/sources/registry/remote.rs --- cargo-0.17.0/src/cargo/sources/registry/remote.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/sources/registry/remote.rs 2017-05-16 03:23:10.000000000 +0000 @@ -5,7 +5,7 @@ use curl::easy::{Easy, List}; use git2; use rustc_serialize::hex::ToHex; -use rustc_serialize::json; +use serde_json; use url::Url; use core::{PackageId, SourceId}; @@ -49,7 +49,7 @@ "the registry index")?; let path = lock.path().parent().unwrap(); let contents = paths::read(&path.join("config.json"))?; - let config = json::decode(&contents)?; + let config = serde_json::from_str(&contents)?; Ok(Some(config)) } @@ -88,7 +88,7 @@ }; debug!("attempting github fast path for {}", self.source_id.url()); - if github_up_to_date(handle, &self.source_id.url(), &oid) { + if github_up_to_date(handle, self.source_id.url(), &oid) { return Ok(()) } debug!("fast path failed, falling back to a git fetch"); @@ -99,7 +99,7 @@ let url = self.source_id.url().to_string(); let refspec = "refs/heads/*:refs/remotes/origin/*"; - git::fetch(&repo, &url, refspec, &self.config).chain_error(|| { + git::fetch(&repo, &url, refspec, self.config).chain_error(|| { human(format!("failed to fetch `{}`", url)) })?; diff -Nru cargo-0.17.0/src/cargo/sources/replaced.rs cargo-0.19.0/src/cargo/sources/replaced.rs --- cargo-0.17.0/src/cargo/sources/replaced.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/sources/replaced.rs 2017-05-16 03:23:10.000000000 +0000 @@ -54,7 +54,7 @@ } fn fingerprint(&self, id: &Package) -> CargoResult { - self.inner.fingerprint(&id) + self.inner.fingerprint(id) } fn verify(&self, id: &PackageId) -> CargoResult<()> { diff -Nru cargo-0.17.0/src/cargo/util/config.rs cargo-0.19.0/src/cargo/util/config.rs --- cargo-0.17.0/src/cargo/util/config.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/config.rs 2017-05-16 03:23:10.000000000 +0000 @@ -17,6 +17,7 @@ use core::MultiShell; use util::{CargoResult, CargoError, ChainError, Rustc, internal, human}; use util::{Filesystem, LazyCell}; +use util::paths; use util::toml as cargo_toml; @@ -28,6 +29,7 @@ rustc: LazyCell, values: LazyCell>, cwd: PathBuf, + cargo_exe: LazyCell, rustdoc: LazyCell, extra_verbose: Cell, frozen: Cell, @@ -44,6 +46,7 @@ rustc: LazyCell::new(), cwd: cwd, values: LazyCell::new(), + cargo_exe: LazyCell::new(), rustdoc: LazyCell::new(), extra_verbose: Cell::new(false), frozen: Cell::new(false), @@ -90,13 +93,33 @@ } pub fn rustc(&self) -> CargoResult<&Rustc> { - self.rustc.get_or_try_init(|| Rustc::new(self.get_tool("rustc")?)) + self.rustc.get_or_try_init(|| Rustc::new(self.get_tool("rustc")?, + self.maybe_get_tool("rustc_wrapper")?)) + } + + pub fn cargo_exe(&self) -> CargoResult<&Path> { + self.cargo_exe.get_or_try_init(|| + env::current_exe().and_then(|path| path.canonicalize()) + .chain_error(|| { + human("couldn't get the path to cargo executable") + }) + ).map(AsRef::as_ref) } pub fn values(&self) -> CargoResult<&HashMap> { self.values.get_or_try_init(|| self.load_values()) } + pub fn set_values(&self, values: HashMap) -> CargoResult<()> { + if self.values.borrow().is_some() { + return Err(human("Config values already found")); + } + match self.values.fill(values) { + Ok(()) => Ok(()), + Err(_) => Err(human("Could not fill values")), + } + } + pub fn cwd(&self) -> &Path { &self.cwd } pub fn target_dir(&self) -> CargoResult> { @@ -193,7 +216,7 @@ } pub fn get_path(&self, key: &str) -> CargoResult>> { - if let Some(val) = self.get_string(&key)? { + if let Some(val) = self.get_string(key)? { let is_path = val.val.contains('/') || (cfg!(windows) && val.val.contains('\\')); let path = if is_path { @@ -366,19 +389,18 @@ !self.frozen.get() && !self.locked.get() } - fn load_values(&self) -> CargoResult> { + pub fn load_values(&self) -> CargoResult> { let mut cfg = CV::Table(HashMap::new(), PathBuf::from(".")); walk_tree(&self.cwd, |mut file, path| { let mut contents = String::new(); file.read_to_string(&mut contents)?; - let table = cargo_toml::parse(&contents, - &path, - self).chain_error(|| { + let toml = cargo_toml::parse(&contents, + &path, + self).chain_error(|| { human(format!("could not parse TOML configuration in `{}`", path.display())) })?; - let toml = toml::Value::Table(table); let value = CV::from_toml(&path, toml).chain_error(|| { human(format!("failed to load TOML configuration from `{}`", path.display())) @@ -394,28 +416,37 @@ } } - fn get_tool(&self, tool: &str) -> CargoResult { + /// Look for a path for `tool` in an environment variable or config path, but return `None` + /// if it's not present. + fn maybe_get_tool(&self, tool: &str) -> CargoResult> { let var = tool.chars().flat_map(|c| c.to_uppercase()).collect::(); if let Some(tool_path) = env::var_os(&var) { - return Ok(PathBuf::from(tool_path)); + return Ok(Some(PathBuf::from(tool_path))); } let var = format!("build.{}", tool); if let Some(tool_path) = self.get_path(&var)? { - return Ok(tool_path.val); + return Ok(Some(tool_path.val)); } - Ok(PathBuf::from(tool)) + Ok(None) + } + + /// Look for a path for `tool` in an environment variable or config path, defaulting to `tool` + /// as a path. + fn get_tool(&self, tool: &str) -> CargoResult { + self.maybe_get_tool(tool) + .map(|t| t.unwrap_or(PathBuf::from(tool))) } } -#[derive(Eq, PartialEq, Clone, RustcEncodable, RustcDecodable, Copy)] +#[derive(Eq, PartialEq, Clone, Copy)] pub enum Location { Project, Global } -#[derive(Eq,PartialEq,Clone,RustcDecodable)] +#[derive(Eq,PartialEq,Clone,Deserialize)] pub enum ConfigValue { Integer(i64, PathBuf), String(String, PathBuf), @@ -688,10 +719,9 @@ fn walk_tree(pwd: &Path, mut walk: F) -> CargoResult<()> where F: FnMut(File, &Path) -> CargoResult<()> { - let mut current = pwd; let mut stash: HashSet = HashSet::new(); - loop { + for current in paths::ancestors(pwd) { let possible = current.join(".cargo").join("config"); if fs::metadata(&possible).is_ok() { let file = File::open(&possible)?; @@ -700,11 +730,6 @@ stash.insert(possible); } - - match current.parent() { - Some(p) => current = p, - None => break, - } } // Once we're done, also be sure to walk the home directory even if it's not @@ -743,9 +768,11 @@ let mut contents = String::new(); let _ = file.read_to_string(&mut contents); let mut toml = cargo_toml::parse(&contents, file.path(), cfg)?; - toml.insert(key.to_string(), value.into_toml()); + toml.as_table_mut() + .unwrap() + .insert(key.to_string(), value.into_toml()); - let contents = toml::Value::Table(toml).to_string(); + let contents = toml.to_string(); file.seek(SeekFrom::Start(0))?; file.write_all(contents.as_bytes())?; file.file().set_len(contents.len() as u64)?; diff -Nru cargo-0.17.0/src/cargo/util/dependency_queue.rs cargo-0.19.0/src/cargo/util/dependency_queue.rs --- cargo-0.17.0/src/cargo/util/dependency_queue.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/dependency_queue.rs 2017-05-16 03:23:10.000000000 +0000 @@ -85,7 +85,7 @@ for dep in dependencies { assert!(my_dependencies.insert(dep.clone())); let rev = self.reverse_dep_map.entry(dep.clone()) - .or_insert(HashSet::new()); + .or_insert_with(HashSet::new); assert!(rev.insert(key.clone())); } &mut slot.insert((my_dependencies, value)).1 diff -Nru cargo-0.17.0/src/cargo/util/errors.rs cargo-0.19.0/src/cargo/util/errors.rs --- cargo-0.17.0/src/cargo/util/errors.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/errors.rs 2017-05-16 03:23:10.000000000 +0000 @@ -7,10 +7,12 @@ use std::str; use std::string; +use core::TargetKind; + use curl; use git2; -use rustc_serialize::json; use semver; +use serde_json; use term; use toml; use url; @@ -137,13 +139,20 @@ /// Error when testcases fail pub struct CargoTestError { + pub test: Test, pub desc: String, pub exit: Option, pub causes: Vec, } +pub enum Test { + Multiple, + Doc, + UnitTest(TargetKind, String) +} + impl CargoTestError { - pub fn new(errors: Vec) -> Self { + pub fn new(test: Test, errors: Vec) -> Self { if errors.is_empty() { panic!("Cannot create CargoTestError from empty Vec") } @@ -151,11 +160,30 @@ .collect::>() .join("\n"); CargoTestError { + test: test, desc: desc, exit: errors[0].exit, causes: errors, } } + + pub fn hint(&self) -> String { + match &self.test { + &Test::UnitTest(ref kind, ref name) => { + match *kind { + TargetKind::Bench => format!("test failed, to rerun pass '--bench {}'", name), + TargetKind::Bin => format!("test failed, to rerun pass '--bin {}'", name), + TargetKind::Lib(_) => "test failed, to rerun pass '--lib'".into(), + TargetKind::Test => format!("test failed, to rerun pass '--test {}'", name), + TargetKind::ExampleBin | TargetKind::ExampleLib(_) => + format!("test failed, to rerun pass '--example {}", name), + _ => "test failed.".into() + } + }, + &Test::Doc => "test failed, to rerun pass '--doc'".into(), + _ => "test failed.".into() + } + } } impl fmt::Display for CargoTestError { @@ -332,13 +360,12 @@ io::Error, ProcessError, git2::Error, - json::DecoderError, - json::EncoderError, + serde_json::Error, curl::Error, CliError, - toml::Error, url::ParseError, - toml::DecodeError, + toml::ser::Error, + toml::de::Error, ffi::NulError, term::Error, num::ParseIntError, @@ -358,14 +385,17 @@ impl CargoError for semver::ReqParseError {} impl CargoError for io::Error {} impl CargoError for git2::Error {} -impl CargoError for json::DecoderError {} -impl CargoError for json::EncoderError {} +impl CargoError for serde_json::Error {} impl CargoError for curl::Error {} impl CargoError for ProcessError {} impl CargoError for CargoTestError {} impl CargoError for CliError {} -impl CargoError for toml::Error {} -impl CargoError for toml::DecodeError {} +impl CargoError for toml::ser::Error { + fn is_human(&self) -> bool { true } +} +impl CargoError for toml::de::Error { + fn is_human(&self) -> bool { true } +} impl CargoError for url::ParseError {} impl CargoError for ffi::NulError {} impl CargoError for term::Error {} @@ -388,14 +418,14 @@ if let Some(out) = output { match str::from_utf8(&out.stdout) { - Ok(s) if s.trim().len() > 0 => { + Ok(s) if !s.trim().is_empty() => { desc.push_str("\n--- stdout\n"); desc.push_str(s); } Ok(..) | Err(..) => {} } match str::from_utf8(&out.stderr) { - Ok(s) if s.trim().len() > 0 => { + Ok(s) if !s.trim().is_empty() => { desc.push_str("\n--- stderr\n"); desc.push_str(s); } diff -Nru cargo-0.17.0/src/cargo/util/flock.rs cargo-0.19.0/src/cargo/util/flock.rs --- cargo-0.17.0/src/cargo/util/flock.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/flock.rs 2017-05-16 03:23:10.000000000 +0000 @@ -276,6 +276,9 @@ #[cfg(target_os = "macos")] Err(ref e) if e.raw_os_error() == Some(libc::ENOTSUP) => return Ok(()), + #[cfg(target_os = "linux")] + Err(ref e) if e.raw_os_error() == Some(libc::ENOSYS) => return Ok(()), + Err(e) => { if e.raw_os_error() != lock_contended_error().raw_os_error() { return Err(human(e)).chain_error(|| { diff -Nru cargo-0.17.0/src/cargo/util/important_paths.rs cargo-0.19.0/src/cargo/util/important_paths.rs --- cargo-0.17.0/src/cargo/util/important_paths.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/important_paths.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,6 +1,7 @@ use std::fs; use std::path::{Path, PathBuf}; use util::{CargoResult, human}; +use util::paths; /// Iteratively search for `file` in `pwd` and its parents, returning /// the path of the directory. @@ -38,7 +39,7 @@ -> CargoResult { match manifest_path { Some(path) => { - let absolute_path = cwd.join(&path); + let absolute_path = paths::normalize_path(&cwd.join(&path)); if !absolute_path.ends_with("Cargo.toml") { bail!("the manifest-path must be a path to a Cargo.toml file") } @@ -47,7 +48,7 @@ } Ok(absolute_path) }, - None => find_project_manifest(&cwd, "Cargo.toml"), + None => find_project_manifest(cwd, "Cargo.toml"), } } diff -Nru cargo-0.17.0/src/cargo/util/lazy_cell.rs cargo-0.19.0/src/cargo/util/lazy_cell.rs --- cargo-0.17.0/src/cargo/util/lazy_cell.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/lazy_cell.rs 2017-05-16 03:23:10.000000000 +0000 @@ -58,7 +58,7 @@ where F: FnOnce() -> Result { if self.borrow().is_none() { - if let Err(_) = self.fill(init()?) { + if self.fill(init()?).is_err() { unreachable!(); } } diff -Nru cargo-0.17.0/src/cargo/util/machine_message.rs cargo-0.19.0/src/cargo/util/machine_message.rs --- cargo-0.17.0/src/cargo/util/machine_message.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/machine_message.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,27 +1,23 @@ -use rustc_serialize::Encodable; -use rustc_serialize::json::{self, Json}; +use serde::ser; +use serde_json::{self, Value}; use core::{PackageId, Target, Profile}; -pub trait Message: Encodable { +pub trait Message: ser::Serialize { fn reason(&self) -> &str; } pub fn emit(t: T) { - let json = json::encode(&t).unwrap(); - let mut map = match json.parse().unwrap() { - Json::Object(obj) => obj, - _ => panic!("not a json object"), - }; - map.insert("reason".to_string(), Json::String(t.reason().to_string())); - println!("{}", Json::Object(map)); + let mut json: Value = serde_json::to_value(&t).unwrap(); + json["reason"] = json!(t.reason()); + println!("{}", json); } -#[derive(RustcEncodable)] +#[derive(Serialize)] pub struct FromCompiler<'a> { pub package_id: &'a PackageId, pub target: &'a Target, - pub message: json::Json, + pub message: serde_json::Value, } impl<'a> Message for FromCompiler<'a> { @@ -30,13 +26,14 @@ } } -#[derive(RustcEncodable)] +#[derive(Serialize)] pub struct Artifact<'a> { pub package_id: &'a PackageId, pub target: &'a Target, pub profile: &'a Profile, pub features: Vec, pub filenames: Vec, + pub fresh: bool, } impl<'a> Message for Artifact<'a> { @@ -45,7 +42,7 @@ } } -#[derive(RustcEncodable)] +#[derive(Serialize)] pub struct BuildScript<'a> { pub package_id: &'a PackageId, pub linked_libs: &'a [String], diff -Nru cargo-0.17.0/src/cargo/util/mod.rs cargo-0.19.0/src/cargo/util/mod.rs --- cargo-0.17.0/src/cargo/util/mod.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/mod.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,7 +1,7 @@ pub use self::cfg::{Cfg, CfgExpr}; -pub use self::config::{Config, homedir}; +pub use self::config::{Config, ConfigValue, homedir}; pub use self::dependency_queue::{DependencyQueue, Fresh, Dirty, Freshness}; -pub use self::errors::{CargoResult, CargoError, ChainError, CliResult}; +pub use self::errors::{CargoResult, CargoError, Test, ChainError, CliResult}; pub use self::errors::{CliError, ProcessError, CargoTestError}; pub use self::errors::{Human, caused_human}; pub use self::errors::{process_error, internal_error, internal, human}; @@ -17,7 +17,7 @@ pub use self::sha256::Sha256; pub use self::to_semver::ToSemver; pub use self::to_url::ToUrl; -pub use self::vcs::{GitRepo, HgRepo}; +pub use self::vcs::{GitRepo, HgRepo, PijulRepo}; pub use self::read2::read2; pub mod config; diff -Nru cargo-0.17.0/src/cargo/util/paths.rs cargo-0.19.0/src/cargo/util/paths.rs --- cargo-0.17.0/src/cargo/util/paths.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/paths.rs 2017-05-16 03:23:10.000000000 +0000 @@ -142,3 +142,42 @@ Err(..) => Err(human("invalid non-unicode path")), } } + +pub fn ancestors(path: &Path) -> PathAncestors { + PathAncestors::new(path) +} + +pub struct PathAncestors<'a> { + current: Option<&'a Path>, + stop_at: Option +} + +impl<'a> PathAncestors<'a> { + fn new(path: &Path) -> PathAncestors { + PathAncestors { + current: Some(path), + //HACK: avoid reading `~/.cargo/config` when testing Cargo itself. + stop_at: env::var("__CARGO_TEST_ROOT").ok().map(PathBuf::from), + } + } +} + +impl<'a> Iterator for PathAncestors<'a> { + type Item = &'a Path; + + fn next(&mut self) -> Option<&'a Path> { + if let Some(path) = self.current { + self.current = path.parent(); + + if let Some(ref stop_at) = self.stop_at { + if path == stop_at { + self.current = None; + } + } + + Some(path) + } else { + None + } + } +} diff -Nru cargo-0.17.0/src/cargo/util/read2.rs cargo-0.19.0/src/cargo/util/read2.rs --- cargo-0.17.0/src/cargo/util/read2.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/read2.rs 2017-05-16 03:23:10.000000000 +0000 @@ -149,7 +149,7 @@ unsafe fn read(&mut self) -> io::Result<()> { let dst = slice_to_end(self.dst); - match self.pipe.read_overlapped(dst, &mut self.overlapped) { + match self.pipe.read_overlapped(dst, self.overlapped.raw()) { Ok(_) => Ok(()), Err(e) => { if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) { diff -Nru cargo-0.17.0/src/cargo/util/rustc.rs cargo-0.19.0/src/cargo/util/rustc.rs --- cargo-0.17.0/src/cargo/util/rustc.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/rustc.rs 2017-05-16 03:23:10.000000000 +0000 @@ -4,10 +4,9 @@ pub struct Rustc { pub path: PathBuf, + pub wrapper: Option, pub verbose_version: String, pub host: String, - /// Backwards compatibility: does this compiler support `--cap-lints` flag? - pub cap_lints: bool, } impl Rustc { @@ -16,17 +15,11 @@ /// /// If successful this function returns a description of the compiler along /// with a list of its capabilities. - pub fn new(path: PathBuf) -> CargoResult { + pub fn new(path: PathBuf, wrapper: Option) -> CargoResult { let mut cmd = util::process(&path); cmd.arg("-vV"); - let mut first = cmd.clone(); - first.arg("--cap-lints").arg("allow"); - - let (cap_lints, output) = match first.exec_with_output() { - Ok(output) => (true, output), - Err(..) => (false, cmd.exec_with_output()?), - }; + let output = cmd.exec_with_output()?; let verbose_version = String::from_utf8(output.stdout).map_err(|_| { internal("rustc -v didn't return utf8 output") @@ -44,13 +37,21 @@ Ok(Rustc { path: path, + wrapper: wrapper, verbose_version: verbose_version, host: host, - cap_lints: cap_lints, }) } pub fn process(&self) -> ProcessBuilder { - util::process(&self.path) + if let Some(ref wrapper) = self.wrapper { + let mut cmd = util::process(wrapper); + { + cmd.arg(&self.path); + } + cmd + } else { + util::process(&self.path) + } } } diff -Nru cargo-0.17.0/src/cargo/util/toml.rs cargo-0.19.0/src/cargo/util/toml.rs --- cargo-0.17.0/src/cargo/util/toml.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/toml.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,4 +1,4 @@ -use std::collections::{HashMap, HashSet}; +use std::collections::{HashMap, HashSet, BTreeSet}; use std::default::Default; use std::fmt; use std::fs; @@ -7,7 +7,8 @@ use toml; use semver::{self, VersionReq}; -use rustc_serialize::{Decodable, Decoder}; +use serde::de::{self, Deserialize}; +use serde_ignored; use core::{SourceId, Profiles, PackageIdSpec, GitReference, WorkspaceConfig}; use core::{Summary, Manifest, Target, Dependency, DependencyInner, PackageId}; @@ -29,7 +30,6 @@ examples: Vec, tests: Vec, benches: Vec, - } impl Layout { @@ -64,15 +64,6 @@ benches: benches, } } - - fn main(&self) -> Option<&PathBuf> { - self.bins.iter().find(|p| { - match p.file_name().and_then(|s| s.to_str()) { - Some(s) => s == "main.rs", - None => false - } - }) - } } fn try_add_file(files: &mut Vec, file: PathBuf) { @@ -81,25 +72,23 @@ } } fn try_add_files(files: &mut Vec, root: PathBuf) { - match fs::read_dir(&root) { - Ok(new) => { - files.extend(new.filter_map(|dir| { - dir.map(|d| d.path()).ok() - }).filter(|f| { - f.extension().and_then(|s| s.to_str()) == Some("rs") - }).filter(|f| { - // Some unix editors may create "dotfiles" next to original - // source files while they're being edited, but these files are - // rarely actually valid Rust source files and sometimes aren't - // even valid UTF-8. Here we just ignore all of them and require - // that they are explicitly specified in Cargo.toml if desired. - f.file_name().and_then(|s| s.to_str()).map(|s| { - !s.starts_with('.') - }).unwrap_or(true) - })) - } - Err(_) => {/* just don't add anything if the directory doesn't exist, etc. */} + if let Ok(new) = fs::read_dir(&root) { + files.extend(new.filter_map(|dir| { + dir.map(|d| d.path()).ok() + }).filter(|f| { + f.extension().and_then(|s| s.to_str()) == Some("rs") + }).filter(|f| { + // Some unix editors may create "dotfiles" next to original + // source files while they're being edited, but these files are + // rarely actually valid Rust source files and sometimes aren't + // even valid UTF-8. Here we just ignore all of them and require + // that they are explicitly specified in Cargo.toml if desired. + f.file_name().and_then(|s| s.to_str()).map(|s| { + !s.starts_with('.') + }).unwrap_or(true) + })) } + /* else just don't add anything if the directory doesn't exist, etc. */ } pub fn to_manifest(contents: &str, @@ -113,15 +102,19 @@ None => manifest.clone(), }; let root = parse(contents, &manifest, config)?; - let mut d = toml::Decoder::new(toml::Value::Table(root)); - let manifest: TomlManifest = Decodable::decode(&mut d).map_err(|e| { - human(e.to_string()) + let mut unused = BTreeSet::new(); + let manifest: TomlManifest = serde_ignored::deserialize(root, |path| { + let mut key = String::new(); + stringify(&mut key, &path); + if !key.starts_with("package.metadata") { + unused.insert(key); + } })?; return match manifest.to_real_manifest(source_id, &layout, config) { Ok((mut manifest, paths)) => { - if let Some(ref toml) = d.toml { - add_unused_keys(&mut manifest, toml, String::new()); + for key in unused { + manifest.add_warning(format!("unused manifest key: {}", key)); } if !manifest.targets().iter().any(|t| !t.is_custom_build()) { bail!("no targets specified in the manifest\n \ @@ -138,41 +131,43 @@ } }; - fn add_unused_keys(m: &mut Manifest, toml: &toml::Value, key: String) { - if key == "package.metadata" { - return - } - match *toml { - toml::Value::Table(ref table) => { - for (k, v) in table.iter() { - add_unused_keys(m, v, if key.is_empty() { - k.clone() - } else { - key.clone() + "." + k - }) - } - } - toml::Value::Array(ref arr) => { - for v in arr.iter() { - add_unused_keys(m, v, key.clone()); + fn stringify(dst: &mut String, path: &serde_ignored::Path) { + use serde_ignored::Path; + + match *path { + Path::Root => {} + Path::Seq { parent, index } => { + stringify(dst, parent); + if dst.len() > 0 { + dst.push_str("."); } + dst.push_str(&index.to_string()); } - _ => m.add_warning(format!("unused manifest key: {}", key)), + Path::Map { parent, ref key } => { + stringify(dst, parent); + if dst.len() > 0 { + dst.push_str("."); + } + dst.push_str(key); + } + Path::Some { parent } | + Path::NewtypeVariant { parent } | + Path::NewtypeStruct { parent } => stringify(dst, parent), } } } pub fn parse(toml: &str, file: &Path, - config: &Config) -> CargoResult { - let mut first_parser = toml::Parser::new(&toml); - if let Some(toml) = first_parser.parse() { - return Ok(toml); - } + config: &Config) -> CargoResult { + let first_error = match toml.parse() { + Ok(ret) => return Ok(ret), + Err(e) => e, + }; - let mut second_parser = toml::Parser::new(toml); + let mut second_parser = toml::de::Deserializer::new(toml); second_parser.set_require_newline_after_table(false); - if let Some(toml) = second_parser.parse() { + if let Ok(ret) = toml::Value::deserialize(&mut second_parser) { let msg = format!("\ TOML file found which contains invalid syntax and will soon not parse at `{}`. @@ -182,25 +177,12 @@ it. A newline needs to be added and this warning will soon become a hard error in the future.", file.display()); config.shell().warn(&msg)?; - return Ok(toml) + return Ok(ret) } - let mut error_str = format!("could not parse input as TOML\n"); - for error in first_parser.errors.iter() { - let (loline, locol) = first_parser.to_linecol(error.lo); - let (hiline, hicol) = first_parser.to_linecol(error.hi); - error_str.push_str(&format!("{}:{}:{}{} {}\n", - file.display(), - loline + 1, locol + 1, - if loline != hiline || locol != hicol { - format!("-{}:{}", hiline + 1, - hicol + 1) - } else { - "".to_string() - }, - error.desc)); - } - Err(human(error_str)) + Err(first_error).chain_error(|| { + human("could not parse input as TOML") + }) } type TomlLibTarget = TomlTarget; @@ -209,14 +191,44 @@ type TomlTestTarget = TomlTarget; type TomlBenchTarget = TomlTarget; -#[derive(RustcDecodable)] pub enum TomlDependency { Simple(String), Detailed(DetailedTomlDependency) } +impl de::Deserialize for TomlDependency { + fn deserialize(deserializer: D) -> Result + where D: de::Deserializer + { + struct TomlDependencyVisitor; + + impl de::Visitor for TomlDependencyVisitor { + type Value = TomlDependency; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a version string like \"0.9.8\" or a \ + detailed dependency like { version = \"0.9.8\" }") + } + + fn visit_str(self, s: &str) -> Result + where E: de::Error + { + Ok(TomlDependency::Simple(s.to_owned())) + } + + fn visit_map(self, map: V) -> Result + where V: de::MapVisitor + { + let mvd = de::value::MapVisitorDeserializer::new(map); + DetailedTomlDependency::deserialize(mvd).map(TomlDependency::Detailed) + } + } -#[derive(RustcDecodable, Clone, Default)] + deserializer.deserialize(TomlDependencyVisitor) + } +} + +#[derive(Deserialize, Clone, Default)] pub struct DetailedTomlDependency { version: Option, path: Option, @@ -226,10 +238,13 @@ rev: Option, features: Option>, optional: Option, + #[serde(rename = "default-features")] default_features: Option, + #[serde(rename = "default_features")] + default_features2: Option, } -#[derive(RustcDecodable)] +#[derive(Deserialize)] pub struct TomlManifest { package: Option>, project: Option>, @@ -240,8 +255,14 @@ test: Option>, bench: Option>, dependencies: Option>, + #[serde(rename = "dev-dependencies")] dev_dependencies: Option>, + #[serde(rename = "dev_dependencies")] + dev_dependencies2: Option>, + #[serde(rename = "build-dependencies")] build_dependencies: Option>, + #[serde(rename = "build_dependencies")] + build_dependencies2: Option>, features: Option>>, target: Option>, replace: Option>, @@ -249,7 +270,7 @@ badges: Option>>, } -#[derive(RustcDecodable, Clone, Default)] +#[derive(Deserialize, Clone, Default)] pub struct TomlProfiles { test: Option, doc: Option, @@ -261,50 +282,140 @@ #[derive(Clone)] pub struct TomlOptLevel(String); -impl Decodable for TomlOptLevel { - fn decode(d: &mut D) -> Result { - match d.read_u32() { - Ok(i) => Ok(TomlOptLevel(i.to_string())), - Err(_) => { - match d.read_str() { - Ok(ref s) if s == "s" || s == "z" => - Ok(TomlOptLevel(s.to_string())), - Ok(_) | Err(_) => - Err(d.error("expected an integer, a string \"z\" or a string \"s\"")) +impl de::Deserialize for TomlOptLevel { + fn deserialize(d: D) -> Result + where D: de::Deserializer + { + struct Visitor; + + impl de::Visitor for Visitor { + type Value = TomlOptLevel; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an optimization level") + } + + fn visit_i64(self, value: i64) -> Result + where E: de::Error + { + Ok(TomlOptLevel(value.to_string())) + } + + fn visit_str(self, value: &str) -> Result + where E: de::Error + { + if value == "s" || value == "z" { + Ok(TomlOptLevel(value.to_string())) + } else { + Err(E::custom(format!("must be an integer, `z`, or `s`, \ + but found: {}", value))) } } } + + d.deserialize_u32(Visitor) } } -#[derive(RustcDecodable, Clone)] +#[derive(Clone)] pub enum U32OrBool { U32(u32), Bool(bool), } -#[derive(RustcDecodable, Clone, Default)] +impl de::Deserialize for U32OrBool { + fn deserialize(deserializer: D) -> Result + where D: de::Deserializer + { + struct Visitor; + + impl de::Visitor for Visitor { + type Value = U32OrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a boolean or an integer") + } + + fn visit_i64(self, u: i64) -> Result + where E: de::Error, + { + Ok(U32OrBool::U32(u as u32)) + } + + fn visit_u64(self, u: u64) -> Result + where E: de::Error, + { + Ok(U32OrBool::U32(u as u32)) + } + + fn visit_bool(self, b: bool) -> Result + where E: de::Error, + { + Ok(U32OrBool::Bool(b)) + } + } + + deserializer.deserialize(Visitor) + } +} + +#[derive(Deserialize, Clone, Default)] pub struct TomlProfile { + #[serde(rename = "opt-level")] opt_level: Option, lto: Option, + #[serde(rename = "codegen-units")] codegen_units: Option, debug: Option, + #[serde(rename = "debug-assertions")] debug_assertions: Option, rpath: Option, panic: Option, + #[serde(rename = "overflow-checks")] + overflow_checks: Option, } -#[derive(RustcDecodable, Clone, Debug)] +#[derive(Clone, Debug)] pub enum StringOrBool { String(String), Bool(bool), } -#[derive(RustcDecodable)] +impl de::Deserialize for StringOrBool { + fn deserialize(deserializer: D) -> Result + where D: de::Deserializer + { + struct Visitor; + + impl de::Visitor for Visitor { + type Value = StringOrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a boolean or a string") + } + + fn visit_str(self, s: &str) -> Result + where E: de::Error, + { + Ok(StringOrBool::String(s.to_string())) + } + + fn visit_bool(self, b: bool) -> Result + where E: de::Error, + { + Ok(StringOrBool::Bool(b)) + } + } + + deserializer.deserialize(Visitor) + } +} + +#[derive(Deserialize)] pub struct TomlProject { name: String, version: TomlVersion, - authors: Vec, + authors: Option>, build: Option, links: Option, exclude: Option>, @@ -320,26 +431,45 @@ keywords: Option>, categories: Option>, license: Option, + #[serde(rename = "license-file")] license_file: Option, repository: Option, } -#[derive(RustcDecodable)] +#[derive(Deserialize)] pub struct TomlWorkspace { members: Option>, + exclude: Option>, } pub struct TomlVersion { version: semver::Version, } -impl Decodable for TomlVersion { - fn decode(d: &mut D) -> Result { - let s = d.read_str()?; - match s.to_semver() { - Ok(s) => Ok(TomlVersion { version: s }), - Err(e) => Err(d.error(&e)), +impl de::Deserialize for TomlVersion { + fn deserialize(d: D) -> Result + where D: de::Deserializer + { + struct Visitor; + + impl de::Visitor for Visitor { + type Value = TomlVersion; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a semver version") + } + + fn visit_str(self, value: &str) -> Result + where E: de::Error + { + match value.to_semver() { + Ok(s) => Ok(TomlVersion { version: s}), + Err(e) => Err(E::custom(e)), + } + } } + + d.deserialize_str(Visitor) } } @@ -371,7 +501,7 @@ layout.lib.as_ref().map(|lib| { TomlTarget { name: Some(name.to_string()), - path: Some(PathValue::Path(lib.clone())), + path: Some(PathValue(lib.clone())), .. TomlTarget::new() } }) @@ -389,7 +519,7 @@ name.map(|name| { TomlTarget { name: Some(name), - path: Some(PathValue::Path(bin.clone())), + path: Some(PathValue(bin.clone())), .. TomlTarget::new() } }) @@ -401,7 +531,7 @@ ex.file_stem().and_then(|s| s.to_str()).map(|name| { TomlTarget { name: Some(name.to_string()), - path: Some(PathValue::Path(ex.clone())), + path: Some(PathValue(ex.clone())), .. TomlTarget::new() } }) @@ -413,7 +543,7 @@ ex.file_stem().and_then(|s| s.to_str()).map(|name| { TomlTarget { name: Some(name.to_string()), - path: Some(PathValue::Path(ex.clone())), + path: Some(PathValue(ex.clone())), .. TomlTarget::new() } }) @@ -425,7 +555,7 @@ ex.file_stem().and_then(|s| s.to_str()).map(|name| { TomlTarget { name: Some(name.to_string()), - path: Some(PathValue::Path(ex.clone())), + path: Some(PathValue(ex.clone())), .. TomlTarget::new() } }) @@ -463,8 +593,8 @@ Some( TomlTarget { name: lib.name.clone().or(Some(project.name.clone())), - path: lib.path.clone().or( - layout.lib.as_ref().map(|p| PathValue::Path(p.clone())) + path: lib.path.clone().or_else( + || layout.lib.as_ref().map(|p| PathValue(p.clone())) ), ..lib.clone() } @@ -475,22 +605,10 @@ let bins = match self.bin { Some(ref bins) => { - let bin = layout.main(); - for target in bins { target.validate_binary_name()?; - } - - bins.iter().map(|t| { - if bin.is_some() && t.path.is_none() { - TomlTarget { - path: bin.as_ref().map(|&p| PathValue::Path(p.clone())), - .. t.clone() - } - } else { - t.clone() - } - }).collect() + }; + bins.clone() } None => inferred_bin_targets(&project.name, layout) }; @@ -555,7 +673,7 @@ } // processing the custom build script - let new_build = self.maybe_custom_build(&project.build, &layout.root, &mut warnings); + let new_build = self.maybe_custom_build(&project.build, &layout.root); // Get targets let targets = normalize(&layout.root, @@ -588,7 +706,7 @@ config: config, warnings: &mut warnings, platform: None, - layout: &layout, + layout: layout, }; fn process_dependencies( @@ -598,7 +716,7 @@ -> CargoResult<()> { let dependencies = match new_deps { - Some(ref dependencies) => dependencies, + Some(dependencies) => dependencies, None => return Ok(()) }; for (n, v) in dependencies.iter() { @@ -612,19 +730,23 @@ // Collect the deps process_dependencies(&mut cx, self.dependencies.as_ref(), None)?; - process_dependencies(&mut cx, self.dev_dependencies.as_ref(), - Some(Kind::Development))?; - process_dependencies(&mut cx, self.build_dependencies.as_ref(), - Some(Kind::Build))?; + let dev_deps = self.dev_dependencies.as_ref() + .or(self.dev_dependencies2.as_ref()); + process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; + let build_deps = self.build_dependencies.as_ref() + .or(self.build_dependencies2.as_ref()); + process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; for (name, platform) in self.target.iter().flat_map(|t| t) { cx.platform = Some(name.parse()?); process_dependencies(&mut cx, platform.dependencies.as_ref(), None)?; - process_dependencies(&mut cx, platform.build_dependencies.as_ref(), - Some(Kind::Build))?; - process_dependencies(&mut cx, platform.dev_dependencies.as_ref(), - Some(Kind::Development))?; + let build_deps = platform.build_dependencies.as_ref() + .or(platform.build_dependencies2.as_ref()); + process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; + let dev_deps = platform.dev_dependencies.as_ref() + .or(platform.dev_dependencies2.as_ref()); + process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; } replace = self.replace(&mut cx)?; @@ -646,13 +768,14 @@ let exclude = project.exclude.clone().unwrap_or(Vec::new()); let include = project.include.clone().unwrap_or(Vec::new()); - let summary = Summary::new(pkgid, deps, self.features.clone() .unwrap_or(HashMap::new()))?; + let summary = Summary::new(pkgid, deps, self.features.clone() + .unwrap_or_else(HashMap::new))?; let metadata = ManifestMetadata { description: project.description.clone(), homepage: project.homepage.clone(), documentation: project.documentation.clone(), readme: project.readme.clone(), - authors: project.authors.clone(), + authors: project.authors.clone().unwrap_or(Vec::new()), license: project.license.clone(), license_file: project.license_file.clone(), repository: project.repository.clone(), @@ -664,7 +787,10 @@ let workspace_config = match (self.workspace.as_ref(), project.workspace.as_ref()) { (Some(config), None) => { - WorkspaceConfig::Root { members: config.members.clone() } + WorkspaceConfig::Root { + members: config.members.clone(), + exclude: config.exclude.clone().unwrap_or(Vec::new()), + } } (None, root) => { WorkspaceConfig::Member { root: root.cloned() } @@ -740,7 +866,10 @@ let profiles = build_profiles(&self.profile); let workspace_config = match self.workspace { Some(ref config) => { - WorkspaceConfig::Root { members: config.members.clone() } + WorkspaceConfig::Root { + members: config.members.clone(), + exclude: config.exclude.clone().unwrap_or(Vec::new()), + } } None => { bail!("virtual manifests must be configured with [workspace]"); @@ -789,8 +918,7 @@ fn maybe_custom_build(&self, build: &Option, - project_dir: &Path, - warnings: &mut Vec) + project_dir: &Path) -> Option { let build_rs = project_dir.join("build.rs"); match *build { @@ -799,17 +927,10 @@ Some(StringOrBool::String(ref s)) => Some(PathBuf::from(s)), None => { match fs::metadata(&build_rs) { - // Enable this after the warning has been visible for some time - // Ok(ref e) if e.is_file() => Some(build_rs.into()), - Ok(ref e) if e.is_file() => { - warnings.push("`build.rs` files in the same directory \ - as your `Cargo.toml` will soon be treated \ - as build scripts. Add `build = false` to \ - your `Cargo.toml` to prevent this".into()); - None - }, - Ok(_) => None, - Err(_) => None, + // If there is a build.rs file next to the Cargo.toml, assume it is + // a build script + Ok(ref e) if e.is_file() => Some(build_rs.into()), + Ok(_) | Err(_) => None, } } } @@ -938,7 +1059,9 @@ None => DependencyInner::parse(name, version, &new_source_id, None)?, }; dep = dep.set_features(details.features.unwrap_or(Vec::new())) - .set_default_features(details.default_features.unwrap_or(true)) + .set_default_features(details.default_features + .or(details.default_features2) + .unwrap_or(true)) .set_optional(details.optional.unwrap_or(false)) .set_platform(cx.platform.clone()); if let Some(kind) = kind { @@ -948,48 +1071,60 @@ } } -#[derive(RustcDecodable, Debug, Clone)] +#[derive(Default, Deserialize, Debug, Clone)] struct TomlTarget { name: Option, + + // The intention was to only accept `crate-type` here but historical + // versions of Cargo also accepted `crate_type`, so look for both. + #[serde(rename = "crate-type")] crate_type: Option>, + #[serde(rename = "crate_type")] + crate_type2: Option>, + path: Option, test: Option, doctest: Option, bench: Option, doc: Option, plugin: Option, + #[serde(rename = "proc-macro")] proc_macro: Option, + #[serde(rename = "proc_macro")] + proc_macro2: Option, harness: Option, + #[serde(rename = "required-features")] + required_features: Option>, } -#[derive(RustcDecodable, Clone)] -enum PathValue { - String(String), - Path(PathBuf), +#[derive(Clone)] +struct PathValue(PathBuf); + +impl de::Deserialize for PathValue { + fn deserialize(deserializer: D) -> Result + where D: de::Deserializer + { + Ok(PathValue(String::deserialize(deserializer)?.into())) + } } /// Corresponds to a `target` entry, but `TomlTarget` is already used. -#[derive(RustcDecodable)] +#[derive(Deserialize)] struct TomlPlatform { dependencies: Option>, + #[serde(rename = "build-dependencies")] build_dependencies: Option>, + #[serde(rename = "build_dependencies")] + build_dependencies2: Option>, + #[serde(rename = "dev-dependencies")] dev_dependencies: Option>, + #[serde(rename = "dev_dependencies")] + dev_dependencies2: Option>, } impl TomlTarget { fn new() -> TomlTarget { - TomlTarget { - name: None, - crate_type: None, - path: None, - test: None, - doctest: None, - bench: None, - doc: None, - plugin: None, - proc_macro: None, - harness: None, - } + TomlTarget::default() } fn name(&self) -> String { @@ -1077,29 +1212,21 @@ // // A plugin requires exporting plugin_registrar so a crate cannot be // both at once. - if self.plugin == Some(true) && self.proc_macro == Some(true) { + if self.plugin == Some(true) && self.proc_macro() == Some(true) { Err(human("lib.plugin and lib.proc-macro cannot both be true".to_string())) } else { Ok(()) } } -} -impl PathValue { - fn to_path(&self) -> PathBuf { - match *self { - PathValue::String(ref s) => PathBuf::from(s), - PathValue::Path(ref p) => p.clone(), - } + fn proc_macro(&self) -> Option { + self.proc_macro.or(self.proc_macro2) } } impl fmt::Debug for PathValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - PathValue::String(ref s) => s.fmt(f), - PathValue::Path(ref p) => p.display().fmt(f), - } + self.0.fmt(f) } } @@ -1117,7 +1244,7 @@ .set_doctest(toml.doctest.unwrap_or(t2.doctested())) .set_benched(toml.bench.unwrap_or(t2.benched())) .set_harness(toml.harness.unwrap_or(t2.harness())) - .set_for_host(match (toml.plugin, toml.proc_macro) { + .set_for_host(match (toml.plugin, toml.proc_macro()) { (None, None) => t2.for_host(), (Some(true), _) | (_, Some(true)) => true, (Some(false), _) | (_, Some(false)) => false, @@ -1125,20 +1252,21 @@ } let lib_target = |dst: &mut Vec, l: &TomlLibTarget| { - let path = l.path.clone().unwrap_or( - PathValue::Path(Path::new("src").join(&format!("{}.rs", l.name()))) + let path = l.path.clone().unwrap_or_else( + || PathValue(Path::new("src").join(&format!("{}.rs", l.name()))) ); - let crate_types = match l.crate_type.clone() { + let crate_types = l.crate_type.as_ref().or(l.crate_type2.as_ref()); + let crate_types = match crate_types { Some(kinds) => kinds.iter().map(|s| LibKind::from_str(s)).collect(), None => { vec![ if l.plugin == Some(true) {LibKind::Dylib} - else if l.proc_macro == Some(true) {LibKind::ProcMacro} + else if l.proc_macro() == Some(true) {LibKind::ProcMacro} else {LibKind::Lib} ] } }; let mut target = Target::lib_target(&l.name(), crate_types, - package_root.join(path.to_path())); + package_root.join(&path.0)); configure(l, &mut target); dst.push(target); }; @@ -1147,9 +1275,10 @@ default: &mut FnMut(&TomlBinTarget) -> PathBuf| { for bin in bins.iter() { let path = bin.path.clone().unwrap_or_else(|| { - PathValue::Path(default(bin)) + PathValue(default(bin)) }); - let mut target = Target::bin_target(&bin.name(), package_root.join(path.to_path())); + let mut target = Target::bin_target(&bin.name(), package_root.join(&path.0), + bin.required_features.clone()); configure(bin, &mut target); dst.push(target); } @@ -1167,18 +1296,20 @@ default: &mut FnMut(&TomlExampleTarget) -> PathBuf| { for ex in examples.iter() { let path = ex.path.clone().unwrap_or_else(|| { - PathValue::Path(default(ex)) + PathValue(default(ex)) }); - let crate_types = match ex.crate_type { - Some(ref kinds) => kinds.iter().map(|s| LibKind::from_str(s)).collect(), + let crate_types = ex.crate_type.as_ref().or(ex.crate_type2.as_ref()); + let crate_types = match crate_types { + Some(kinds) => kinds.iter().map(|s| LibKind::from_str(s)).collect(), None => Vec::new() }; let mut target = Target::example_target( &ex.name(), crate_types, - package_root.join(path.to_path()) + package_root.join(&path.0), + ex.required_features.clone() ); configure(ex, &mut target); dst.push(target); @@ -1190,10 +1321,11 @@ default: &mut FnMut(&TomlTestTarget) -> PathBuf| { for test in tests.iter() { let path = test.path.clone().unwrap_or_else(|| { - PathValue::Path(default(test)) + PathValue(default(test)) }); - let mut target = Target::test_target(&test.name(), package_root.join(path.to_path())); + let mut target = Target::test_target(&test.name(), package_root.join(&path.0), + test.required_features.clone()); configure(test, &mut target); dst.push(target); } @@ -1204,10 +1336,11 @@ default: &mut FnMut(&TomlBenchTarget) -> PathBuf| { for bench in benches.iter() { let path = bench.path.clone().unwrap_or_else(|| { - PathValue::Path(default(bench)) + PathValue(default(bench)) }); - let mut target = Target::bench_target(&bench.name(), package_root.join(path.to_path())); + let mut target = Target::bench_target(&bench.name(), package_root.join(&path.0), + bench.required_features.clone()); configure(bench, &mut target); dst.push(target); } @@ -1218,14 +1351,13 @@ if let Some(ref lib) = *lib { lib_target(&mut ret, lib); bin_targets(&mut ret, bins, - &mut |bin| Path::new("src").join("bin") - .join(&format!("{}.rs", bin.name()))); + &mut |bin| inferred_bin_path(bin, package_root, true, bins.len())); } else if bins.len() > 0 { bin_targets(&mut ret, bins, - &mut |bin| Path::new("src") - .join(&format!("{}.rs", bin.name()))); + &mut |bin| inferred_bin_path(bin, package_root, false, bins.len())); } + if let Some(custom_build) = custom_build { custom_build_target(&mut ret, &custom_build); } @@ -1253,6 +1385,73 @@ ret } +fn inferred_bin_path(bin: &TomlBinTarget, + package_root: &Path, + lib: bool, + bin_len: usize) -> PathBuf { + // we have a lib with multiple bins, so the bins are expected to be located + // inside src/bin + if lib && bin_len > 1 { + return Path::new("src").join("bin").join(&format!("{}.rs", bin.name())) + .to_path_buf() + } + + // we have a lib with one bin, so it's either src/main.rs, src/bin/foo.rs or + // src/bin/main.rs + if lib && bin_len == 1 { + let path = Path::new("src").join(&format!("main.rs")); + if package_root.join(&path).exists() { + return path.to_path_buf() + } + + let path = Path::new("src").join("bin").join(&format!("{}.rs", bin.name())); + if package_root.join(&path).exists() { + return path.to_path_buf() + } + + return Path::new("src").join("bin").join(&format!("main.rs")).to_path_buf() + } + + // here we have a single bin, so it may be located in src/main.rs, src/foo.rs, + // srb/bin/foo.rs or src/bin/main.rs + if bin_len == 1 { + let path = Path::new("src").join(&format!("main.rs")); + if package_root.join(&path).exists() { + return path.to_path_buf() + } + + let path = Path::new("src").join(&format!("{}.rs", bin.name())); + if package_root.join(&path).exists() { + return path.to_path_buf() + } + + let path = Path::new("src").join("bin").join(&format!("{}.rs", bin.name())); + if package_root.join(&path).exists() { + return path.to_path_buf() + } + + return Path::new("src").join("bin").join(&format!("main.rs")).to_path_buf() + } + + // bin_len > 1 + let path = Path::new("src").join("bin").join(&format!("{}.rs", bin.name())); + if package_root.join(&path).exists() { + return path.to_path_buf() + } + + let path = Path::new("src").join(&format!("{}.rs", bin.name())); + if package_root.join(&path).exists() { + return path.to_path_buf() + } + + let path = Path::new("src").join("bin").join(&format!("main.rs")); + if package_root.join(&path).exists() { + return path.to_path_buf() + } + + return Path::new("src").join(&format!("main.rs")).to_path_buf() +} + fn build_profiles(profiles: &Option) -> Profiles { let profiles = profiles.as_ref(); let mut profiles = Profiles { @@ -1286,7 +1485,7 @@ fn merge(profile: Profile, toml: Option<&TomlProfile>) -> Profile { let &TomlProfile { ref opt_level, lto, codegen_units, ref debug, debug_assertions, rpath, - ref panic + ref panic, ref overflow_checks, } = match toml { Some(toml) => toml, None => return profile, @@ -1305,6 +1504,7 @@ rustdoc_args: None, debuginfo: debug.unwrap_or(profile.debuginfo), debug_assertions: debug_assertions.unwrap_or(profile.debug_assertions), + overflow_checks: overflow_checks.unwrap_or(profile.overflow_checks), rpath: rpath.unwrap_or(profile.rpath), test: profile.test, doc: profile.doc, diff -Nru cargo-0.17.0/src/cargo/util/vcs.rs cargo-0.19.0/src/cargo/util/vcs.rs --- cargo-0.17.0/src/cargo/util/vcs.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/cargo/util/vcs.rs 2017-05-16 03:23:10.000000000 +0000 @@ -6,6 +6,7 @@ pub struct HgRepo; pub struct GitRepo; +pub struct PijulRepo; impl GitRepo { pub fn init(path: &Path, _: &Path) -> CargoResult { @@ -28,3 +29,9 @@ } } +impl PijulRepo { + pub fn init(path: &Path, cwd: &Path) -> CargoResult { + process("pijul").cwd(cwd).arg("init").arg(path).exec()?; + Ok(PijulRepo) + } +} diff -Nru cargo-0.17.0/src/ci/docker/android/Dockerfile cargo-0.19.0/src/ci/docker/android/Dockerfile --- cargo-0.17.0/src/ci/docker/android/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/src/ci/docker/android/Dockerfile 2017-05-16 03:23:10.000000000 +0000 @@ -0,0 +1,27 @@ +FROM ubuntu:16.04 + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + cmake \ + curl \ + gcc \ + git \ + libc6-dev \ + make \ + pkg-config + +WORKDIR /android +RUN apt-get install -y --no-install-recommends \ + unzip \ + python && \ + curl -O https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip && \ + unzip -q android-ndk-r13b-linux-x86_64.zip && \ + ./android-ndk-r13b/build/tools/make_standalone_toolchain.py \ + --install-dir /android-ndk/arm --arch arm --api 21 && \ + rm -rf ./android-ndk-r13b-linux-x86_64.zip ./android-ndk-r13b && \ + apt-get purge --auto-remove -y unzip python + +ENV PATH=$PATH:/android-ndk/arm/bin + +ENV CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc diff -Nru cargo-0.17.0/src/ci/docker/run.sh cargo-0.19.0/src/ci/docker/run.sh --- cargo-0.17.0/src/ci/docker/run.sh 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/ci/docker/run.sh 2017-05-16 03:23:10.000000000 +0000 @@ -43,8 +43,6 @@ --volume "$HOME/.cargo:/cargo" \ --volume `rustc --print sysroot`:/rust:ro \ --volume `pwd`/target:/tmp/target \ - --interactive \ - --tty \ rust-ci \ sh -c "\ PATH=\$PATH:/rust/bin \ diff -Nru cargo-0.17.0/src/ci/run.sh cargo-0.19.0/src/ci/run.sh --- cargo-0.17.0/src/ci/run.sh 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/ci/run.sh 2017-05-16 03:23:10.000000000 +0000 @@ -47,6 +47,12 @@ exit 0 fi +# For some unknown reason libz is not found in the android docker image, so we +# use this workaround +if [ "$TARGET" = armv7-linux-androideabi ]; then + export DEP_Z_ROOT=/android-ndk/arm/sysroot/usr +fi + $SRC/configure \ --prefix=/tmp/obj/install \ --target=$TARGET \ diff -Nru cargo-0.17.0/src/crates-io/Cargo.toml cargo-0.19.0/src/crates-io/Cargo.toml --- cargo-0.17.0/src/crates-io/Cargo.toml 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/crates-io/Cargo.toml 2017-05-16 03:23:10.000000000 +0000 @@ -1,6 +1,6 @@ [package] name = "crates-io" -version = "0.6.0" +version = "0.8.0" authors = ["Alex Crichton "] license = "MIT/Apache-2.0" repository = "https://github.com/rust-lang/cargo" @@ -14,5 +14,7 @@ [dependencies] curl = "0.4" +serde = "0.9" +serde_derive = "0.9" +serde_json = "0.9" url = "1.0" -rustc-serialize = "0.3" diff -Nru cargo-0.17.0/src/crates-io/lib.rs cargo-0.19.0/src/crates-io/lib.rs --- cargo-0.17.0/src/crates-io/lib.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/crates-io/lib.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,6 +1,8 @@ extern crate curl; extern crate url; -extern crate rustc_serialize; +extern crate serde_json; +#[macro_use] +extern crate serde_derive; use std::collections::HashMap; use std::fmt; @@ -10,7 +12,6 @@ use std::result; use curl::easy::{Easy, List}; -use rustc_serialize::json::{self, Json}; use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET}; @@ -37,26 +38,12 @@ TokenMissing, Io(io::Error), NotFound, - JsonEncodeError(json::EncoderError), - JsonDecodeError(json::DecoderError), - JsonParseError(json::ParserError), + Json(serde_json::Error), } -impl From for Error { - fn from(err: json::EncoderError) -> Error { - Error::JsonEncodeError(err) - } -} - -impl From for Error { - fn from(err: json::DecoderError) -> Error { - Error::JsonDecodeError(err) - } -} - -impl From for Error { - fn from(err: json::ParserError) -> Error { - Error::JsonParseError(err) +impl From for Error { + fn from(err: serde_json::Error) -> Error { + Error::Json(err) } } @@ -66,14 +53,14 @@ } } -#[derive(RustcDecodable)] +#[derive(Deserialize)] pub struct Crate { pub name: String, pub description: Option, pub max_version: String } -#[derive(RustcEncodable)] +#[derive(Serialize)] pub struct NewCrate { pub name: String, pub vers: String, @@ -92,7 +79,7 @@ pub badges: HashMap>, } -#[derive(RustcEncodable)] +#[derive(Serialize)] pub struct NewCrateDependency { pub optional: bool, pub default_features: bool, @@ -103,7 +90,7 @@ pub kind: String, } -#[derive(RustcDecodable)] +#[derive(Deserialize)] pub struct User { pub id: u32, pub login: String, @@ -117,13 +104,13 @@ pub invalid_badges: Vec, } -#[derive(RustcDecodable)] struct R { ok: bool } -#[derive(RustcDecodable)] struct ApiErrorList { errors: Vec } -#[derive(RustcDecodable)] struct ApiError { detail: String } -#[derive(RustcEncodable)] struct OwnersReq<'a> { users: &'a [&'a str] } -#[derive(RustcDecodable)] struct Users { users: Vec } -#[derive(RustcDecodable)] struct TotalCrates { total: u32 } -#[derive(RustcDecodable)] struct Crates { crates: Vec, meta: TotalCrates } +#[derive(Deserialize)] struct R { ok: bool } +#[derive(Deserialize)] struct ApiErrorList { errors: Vec } +#[derive(Deserialize)] struct ApiError { detail: String } +#[derive(Serialize)] struct OwnersReq<'a> { users: &'a [&'a str] } +#[derive(Deserialize)] struct Users { users: Vec } +#[derive(Deserialize)] struct TotalCrates { total: u32 } +#[derive(Deserialize)] struct Crates { crates: Vec, meta: TotalCrates } impl Registry { pub fn new(host: String, token: Option) -> Registry { Registry::new_handle(host, token, Easy::new()) @@ -140,29 +127,29 @@ } pub fn add_owners(&mut self, krate: &str, owners: &[&str]) -> Result<()> { - let body = json::encode(&OwnersReq { users: owners })?; + let body = serde_json::to_string(&OwnersReq { users: owners })?; let body = self.put(format!("/crates/{}/owners", krate), body.as_bytes())?; - assert!(json::decode::(&body)?.ok); + assert!(serde_json::from_str::(&body)?.ok); Ok(()) } pub fn remove_owners(&mut self, krate: &str, owners: &[&str]) -> Result<()> { - let body = json::encode(&OwnersReq { users: owners })?; + let body = serde_json::to_string(&OwnersReq { users: owners })?; let body = self.delete(format!("/crates/{}/owners", krate), Some(body.as_bytes()))?; - assert!(json::decode::(&body)?.ok); + assert!(serde_json::from_str::(&body)?.ok); Ok(()) } pub fn list_owners(&mut self, krate: &str) -> Result> { let body = self.get(format!("/crates/{}/owners", krate))?; - Ok(json::decode::(&body)?.users) + Ok(serde_json::from_str::(&body)?.users) } pub fn publish(&mut self, krate: &NewCrate, tarball: &File) -> Result { - let json = json::encode(krate)?; + let json = serde_json::to_string(krate)?; // Prepare the body. The format of the upload request is: // // @@ -208,28 +195,27 @@ body.read(buf).unwrap_or(0) })?; - // Can't derive RustcDecodable because JSON has a key named "crate" :( let response = if body.len() > 0 { - Json::from_str(&body)? + body.parse::()? } else { - Json::from_str("{}")? + "{}".parse()? }; let invalid_categories: Vec = - response - .find_path(&["warnings", "invalid_categories"]) - .and_then(Json::as_array) + response.get("warnings") + .and_then(|j| j.get("invalid_categories")) + .and_then(|j| j.as_array()) .map(|x| { - x.iter().flat_map(Json::as_string).map(Into::into).collect() + x.iter().flat_map(|j| j.as_str()).map(Into::into).collect() }) .unwrap_or_else(Vec::new); let invalid_badges: Vec = - response - .find_path(&["warnings", "invalid_badges"]) - .and_then(Json::as_array) + response.get("warnings") + .and_then(|j| j.get("invalid_badges")) + .and_then(|j| j.as_array()) .map(|x| { - x.iter().flat_map(Json::as_string).map(Into::into).collect() + x.iter().flat_map(|j| j.as_str()).map(Into::into).collect() }) .unwrap_or_else(Vec::new); @@ -246,21 +232,21 @@ None, Auth::Unauthorized )?; - let crates = json::decode::(&body)?; + let crates = serde_json::from_str::(&body)?; Ok((crates.crates, crates.meta.total)) } pub fn yank(&mut self, krate: &str, version: &str) -> Result<()> { let body = self.delete(format!("/crates/{}/{}/yank", krate, version), None)?; - assert!(json::decode::(&body)?.ok); + assert!(serde_json::from_str::(&body)?.ok); Ok(()) } pub fn unyank(&mut self, krate: &str, version: &str) -> Result<()> { let body = self.put(format!("/crates/{}/{}/unyank", krate, version), &[])?; - assert!(json::decode::(&body)?.ok); + assert!(serde_json::from_str::(&body)?.ok); Ok(()) } @@ -337,7 +323,7 @@ Ok(body) => body, Err(..) => return Err(Error::NonUtf8Body), }; - match json::decode::(&body) { + match serde_json::from_str::(&body) { Ok(errors) => { return Err(Error::Api(errors.errors.into_iter().map(|s| s.detail) .collect())) @@ -369,9 +355,7 @@ Error::TokenMissing => write!(f, "no upload token found, please run `cargo login`"), Error::Io(ref e) => write!(f, "io error: {}", e), Error::NotFound => write!(f, "cannot find crate"), - Error::JsonEncodeError(ref e) => write!(f, "json encode error: {}", e), - Error::JsonDecodeError(ref e) => write!(f, "json decode error: {}", e), - Error::JsonParseError(ref e) => write!(f, "json parse error: {}", e), + Error::Json(ref e) => write!(f, "json error: {}", e), } } } diff -Nru cargo-0.17.0/src/doc/config.md cargo-0.19.0/src/doc/config.md --- cargo-0.17.0/src/doc/config.md 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/doc/config.md 2017-05-16 03:23:10.000000000 +0000 @@ -58,7 +58,8 @@ # For the following sections, $triple refers to any valid target triple, not the # literal string "$triple", and it will apply whenever that target triple is -# being compiled to. +# being compiled to. 'cfg(...)' refers to the Rust-like `#[cfg]` syntax for +# conditional compilation. [target] # For Cargo builds which do not mention --target, this is the linker # which is passed to rustc (via `-C linker=`). By default this flag is not @@ -73,6 +74,13 @@ # this value overrides build.rustflags when both are present rustflags = ["..", ".."] +[target.'cfg(...)'] +# Similar for the $triple configuration, but using the `cfg` syntax. +# If several `cfg` and $triple targets are candidates, then the rustflags +# are concatenated. The `cfg` syntax only applies to rustflags, and not to +# linker. +rustflags = ["..", ".."] + # Configuration keys related to the registry [registry] index = "..." # URL of the registry index (defaults to the central repository) @@ -83,6 +91,7 @@ # in libcurl format, e.g. "socks5h://host:port" timeout = 60000 # Timeout for each HTTP request, in milliseconds cainfo = "cert.pem" # Path to Certificate Authority (CA) bundle (optional) +check-revoke = true # Indicates whether SSL certs are checked for revocation [build] jobs = 1 # number of parallel jobs, defaults to # of CPUs diff -Nru cargo-0.17.0/src/doc/environment-variables.md cargo-0.19.0/src/doc/environment-variables.md --- cargo-0.17.0/src/doc/environment-variables.md 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/doc/environment-variables.md 2017-05-16 03:23:10.000000000 +0000 @@ -17,6 +17,9 @@ relative to the current working directory. * `RUSTC` - Instead of running `rustc`, Cargo will execute this specified compiler instead. +* `RUSTC_WRAPPER` - Instead of simply running `rustc`, Cargo will execute this + specified wrapper instead, passing as its commandline arguments the rustc + invocation, with the first argument being rustc. * `RUSTDOC` - Instead of running `rustdoc`, Cargo will execute this specified `rustdoc` instance instead. * `RUSTFLAGS` - A space-separated list of custom flags to pass to all compiler @@ -30,8 +33,9 @@ # Environment variables Cargo sets for crates -Cargo exposes these environment variables to your crate when it is compiled. To get the -value of any of these variables in a Rust program, do this: +Cargo exposes these environment variables to your crate when it is compiled. +Note that this applies for test binaries as well. +To get the value of any of these variables in a Rust program, do this: ``` let version = env!("CARGO_PKG_VERSION"); @@ -39,16 +43,19 @@ `version` will now contain the value of `CARGO_PKG_VERSION`. +* `CARGO` - Path to the `cargo` binary performing the build. * `CARGO_MANIFEST_DIR` - The directory containing the manifest of your package. * `CARGO_PKG_VERSION` - The full version of your package. * `CARGO_PKG_VERSION_MAJOR` - The major version of your package. * `CARGO_PKG_VERSION_MINOR` - The minor version of your package. * `CARGO_PKG_VERSION_PATCH` - The patch version of your package. * `CARGO_PKG_VERSION_PRE` - The pre-release version of your package. -* `CARGO_PKG_AUTHORS` - Colon seperated list of authors from the manifest of your package. +* `CARGO_PKG_AUTHORS` - Colon separated list of authors from the manifest of your package. * `CARGO_PKG_NAME` - The name of your package. * `CARGO_PKG_DESCRIPTION` - The description of your package. * `CARGO_PKG_HOMEPAGE` - The home page of your package. +* `OUT_DIR` - If the package has a build script, this is set to the folder where the build + script should place its output. See below for more information. # Environment variables Cargo sets for build scripts @@ -84,8 +91,7 @@ be useful to pass a `-j` parameter to a system like `make`. * `OPT_LEVEL`, `DEBUG` - values of the corresponding variables for the profile currently being built. -* `PROFILE` - name of the profile currently being built (see - [profiles][profile]). +* `PROFILE` - `release` for release builds, `debug` for other builds. * `DEP__` - For more information about this set of environment variables, see build script documentation about [`links`][links]. * `RUSTC`, `RUSTDOC` - the compiler and documentation generator that Cargo has @@ -95,3 +101,10 @@ [links]: build-script.html#the-links-manifest-key [profile]: manifest.html#the-profile-sections [clang]:http://clang.llvm.org/docs/CrossCompilation.html#target-triple + +# Environment variables Cargo sets for 3rd party subcommands + +Cargo exposes this environment variable to 3rd party subcommands +(ie. programs named `cargo-foobar` placed in `$PATH`): + +* `CARGO` - Path to the `cargo` binary performing the build. diff -Nru cargo-0.17.0/src/doc/external-tools.md cargo-0.19.0/src/doc/external-tools.md --- cargo-0.17.0/src/doc/external-tools.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/src/doc/external-tools.md 2017-05-16 03:23:10.000000000 +0000 @@ -0,0 +1,103 @@ +% External tools + +One of the goals of Cargo is simple integration with third-party tools, like +IDEs and other build systems. To make integration easier, Cargo has several +facilities: + +* `cargo metadata` command, which outputs project structure and dependencies + information in JSON, + +* `--message-format` flag, which outputs information about a particular build, + +* support for custom subcommands. + + +# Information about project structure + + +You can use `cargo metadata` command to get information about project structure +and dependencies. The output of the command looks like this: + +```text +{ + // Integer version number of the format. + "version": integer, + + // List of packages for this workspace, including dependencies. + "packages": [ + { + // Opaque package identifier. + "id": PackageId, + + "name": string, + + "version": string, + + "source": SourceId, + + // A list of declared dependencies, see `resolve` field for actual dependencies. + "dependencies": [ Dependency ], + + "targets: [ Target ], + + // Path to Cargo.toml + "manifest_path": string, + } + ], + + "workspace_members": [ PackageId ], + + // Dependencies graph. + "resolve": { + "nodes": [ + { + "id": PackageId, + "dependencies": [ PackageId ] + } + ] + } +} +``` + +The format is stable and versioned. When calling `cargo metadata`, you should +pass `--format-version` flag explicitly to avoid forward incompatibility +hazard. + +If you are using Rust, there is [cargo_metadata] crate. + +[cargo_metadata]: https://crates.io/crates/cargo_metadata + + +# Information about build + +When passing `--message=format=json`, Cargo will output the following +information during the build: + +* compiler errors and warnings, + +* produced artifacts, + +* results of the build scripts (for example, native dependencies). + +The output goes to stdout in the JSON object per line format. The `reason` field +distinguishes different kinds of messages. + +Information about dependencies in the Makefile-compatible format is stored in +the `.d` files alongside the artifacts. + + +# Custom subcommands. + +Cargo is designed to be extensible with new subcommands without having to modify +Cargo itself. This is achieved by translating a cargo invocation of the form +cargo `(?[^ ]+)` into an invocation of an external tool +`cargo-${command}` that then needs to be present in one of the user's `$PATH` +directories. + +Custom subcommand may use `CARGO` environment variable to call back to +Cargo. Alternatively, it can link to `cargo` crate as a library, but this +approach has drawbacks: + +* Cargo as a library is unstable, API changes without deprecation, + +* versions of Cargo library and Cargo binary may be different. diff -Nru cargo-0.17.0/src/doc/header.html cargo-0.19.0/src/doc/header.html --- cargo-0.17.0/src/doc/header.html 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/doc/header.html 2017-05-16 03:23:10.000000000 +0000 @@ -27,7 +27,9 @@ Docs - + diff -Nru cargo-0.17.0/src/doc/machine-readable-output.md cargo-0.19.0/src/doc/machine-readable-output.md --- cargo-0.17.0/src/doc/machine-readable-output.md 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/doc/machine-readable-output.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -% Machine readable output. - -Cargo can output information about project and build in JSON format. - -# Information about project structure - -You can use `cargo metadata` command to get information about project structure -and dependencies. The output of the command looks like this: - -```text -{ - // Integer version number of the format. - "version": integer, - - // List of packages for this workspace, including dependencies. - "packages": [ - { - // Opaque package identifier. - "id": PackageId, - - "name": string, - - "version": string, - - "source": SourceId, - - // A list of declared dependencies, see `resolve` field for actual dependencies. - "dependencies": [ Dependency ], - - "targets: [ Target ], - - // Path to Cargo.toml - "manifest_path": string, - } - ], - - "workspace_members": [ PackageId ], - - // Dependencies graph. - "resolve": { - "nodes": [ - { - "id": PackageId, - "dependencies": [ PackageId ] - } - ] - } -} -``` - - -# Compiler errors - -If you supply `--message-format json` to commands like `cargo build`, Cargo -reports compilation errors and warnings in JSON format. Messages go to the -standard output. Each message occupies exactly one line and does not contain -internal `\n` symbols, so it is possible to process messages one by one -without waiting for the whole build to finish. - -The message format looks like this: - -```text -{ - // Type of the message. - "reason": "compiler-message", - - // Unique opaque identifier of compiled package. - "package_id": PackageId, - - // Unique specification of a particular target within the package. - "target": Target, - - // The error message from the compiler in JSON format. - "message": {...} -} -``` - -Package and target specification are the same that `cargo metadata` uses. diff -Nru cargo-0.17.0/src/doc/manifest.md cargo-0.19.0/src/doc/manifest.md --- cargo-0.17.0/src/doc/manifest.md 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/doc/manifest.md 2017-05-16 03:23:10.000000000 +0000 @@ -142,8 +142,8 @@ license-file = "..." # Optional specification of badges to be displayed on crates.io. The badges -# currently available are Travis CI and Appveyor latest build status, specified -# using the following parameters: +# currently available are Travis CI, Appveyor, and GitLab latest build status, +# specified using the following parameters: [badges] # Travis CI: `repository` is required. `branch` is optional; default is `master` travis-ci = { repository = "...", branch = "master" } @@ -151,6 +151,8 @@ # `service` is optional; valid values are `github` (default), `bitbucket`, and # `gitlab`. appveyor = { repository = "...", branch = "master", service = "github" } +# GitLab: `repository` is required. `branch` is optional; default is `master` +gitlab = { repository = "...", branch = "master" } ``` The [crates.io](https://crates.io) registry will render the description, display @@ -386,6 +388,9 @@ # Optional key, inferred if not present members = ["path/to/member1", "path/to/member2"] + +# Optional key, empty if not present +exclude = ["path1", "path/to/dir2"] ``` Workspaces were added to Cargo as part [RFC 1525] and have a number of @@ -408,7 +413,9 @@ dependencies residing in the workspace directory become members. You can add additional packages to the workspace by listing them in the `members` key. Note that members of the workspaces listed explicitly will also have their path -dependencies included in the workspace. +dependencies included in the workspace. Finally, the `exclude` key can be used +to blacklist paths from being included in a workspace. This can be useful if +some path dependencies aren't desired to be in the workspace at all. The `package.workspace` manifest key (described above) is used in member crates to point at a workspace's root crate. If this key is omitted then it is inferred @@ -549,6 +556,25 @@ harness = true ``` +## The `required-features` field (optional) + +The `required-features` field specifies which features the target needs in order +to be built. If any of the required features are not selected, the target will +be skipped. This is only relevant for the `[[bin]]`, `[[bench]]`, `[[test]]`, +and `[[example]]` sections, it has no effect on `[lib]`. + +```toml +[features] +# ... +postgres = [] +sqlite = [] +tools = [] + +[[bin]] +# ... +required-features = ["postgres", "tools"] +``` + # Building dynamic or static libraries If your project produces a library, you can specify which kind of library to diff -Nru cargo-0.17.0/src/doc/policies.md cargo-0.19.0/src/doc/policies.md --- cargo-0.17.0/src/doc/policies.md 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/doc/policies.md 2017-05-16 03:23:10.000000000 +0000 @@ -3,7 +3,7 @@ In general, these policies are guidelines. Problems are often contextual, and exceptional circumstances sometimes require exceptional measures. We plan to continue to clarify and expand these rules over time as new circumstances -arise. +arise. If your problem is not described below, consider [sending us an email]. # Package Ownership @@ -45,8 +45,9 @@ - We will not be pro-actively monitoring the site for these kinds of violations, but relying on the community to draw them to our attention. -- “Does this violate the Code of Conduct” is a contextual question that - cannot be directly answered in the hypothetical sense. All of the details +- “Does this violate the Code of Conduct” is a contextual question that + cannot be directly answered in the hypothetical sense. All of the details must be taken into consideration in these kinds of situations. [Code of Conduct]: https://www.rust-lang.org/conduct.html +[sending us an email]: mailto:help@crates.io diff -Nru cargo-0.17.0/src/doc/specifying-dependencies.md cargo-0.19.0/src/doc/specifying-dependencies.md --- cargo-0.17.0/src/doc/specifying-dependencies.md 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/doc/specifying-dependencies.md 2017-05-16 03:23:10.000000000 +0000 @@ -216,6 +216,10 @@ work locally with a `[replace]`, and then once the PR is merged and published you can remove `[replace]` and use the newly-published version. +Note: The `Cargo.lock` file will list two versions of the replaced crate: one +for the original crate, and one for the version specified in `[replace]`. +`cargo build -v` can verify that only one version is used in the build. + ### Overriding with local dependencies Sometimes you're only temporarily working on a crate and you don't want to have diff -Nru cargo-0.17.0/src/etc/_cargo cargo-0.19.0/src/etc/_cargo --- cargo-0.17.0/src/etc/_cargo 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/src/etc/_cargo 2017-05-16 03:23:10.000000000 +0000 @@ -3,6 +3,9 @@ typeset -A opt_args autoload -U regexp-replace +zstyle -T ':completion:*:*:cargo:*' tag-order && \ + zstyle ':completion:*:*:cargo:*' tag-order 'common-commands' + _cargo() { _arguments \ @@ -11,10 +14,14 @@ '(- 1 *)'{-v,--verbose}'[use verbose output]' \ '(- 1 *)'--color'[colorization option]' \ '(- 1 *)'{-V,--version}'[show version information]' \ - '1: :_cargo_cmds' \ + '1: :->command' \ '*:: :->args' case $state in + command) + _alternative 'common-commands:common:_cargo_cmds' 'all-commands:all:_cargo_all_cmds' + ;; + args) case $words[1] in bench) @@ -430,8 +437,12 @@ 'version:show version information' 'yank:remove pushed file from index' ) -_describe 'command' commands +_describe -t common-commands 'common commands' commands +} +_cargo_all_cmds(){ +local -a commands;commands=($(cargo --list)) +_describe -t all-commands 'all commands' commands } diff -Nru cargo-0.17.0/tests/bad-config.rs cargo-0.19.0/tests/bad-config.rs --- cargo-0.17.0/tests/bad-config.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/bad-config.rs 2017-05-16 03:23:10.000000000 +0000 @@ -206,12 +206,13 @@ [ERROR] Couldn't load Cargo configuration Caused by: - could not parse TOML configuration in `[..]config` + could not parse TOML configuration in `[..]` Caused by: could not parse input as TOML -[..]config:1:2 expected `=`, but found eof +Caused by: + expected an equals, found eof at line 1 ")); } @@ -232,7 +233,7 @@ [ERROR] failed to parse lock file at: [..]Cargo.lock Caused by: - expected a value of type `string` for the key `package.name` + missing field `name` for key `package` ")); } @@ -315,7 +316,7 @@ [ERROR] failed to parse lock file at: [..] Caused by: - invalid source `You shall not parse` for the key `package.source` + invalid source `You shall not parse` for key `package.source` ")); } @@ -421,8 +422,9 @@ Caused by: could not parse input as TOML -Cargo.toml:[..] +Caused by: + expected a table key, found a newline at line 8 ")); } @@ -945,3 +947,70 @@ error: more than one source URL specified for `source.foo` ")); } + +#[test] +fn bad_dependency() { + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.0.0" + authors = [] + + [dependencies] + bar = 3 + "#) + .file("src/lib.rs", ""); + + assert_that(p.cargo_process("build"), + execs().with_status(101).with_stderr("\ +error: failed to parse manifest at `[..]` + +Caused by: + invalid type: integer `3`, expected a version string like [..] +")); +} + +#[test] +fn bad_debuginfo() { + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.0.0" + authors = [] + + [profile.dev] + debug = 'a' + "#) + .file("src/lib.rs", ""); + + assert_that(p.cargo_process("build"), + execs().with_status(101).with_stderr("\ +error: failed to parse manifest at `[..]` + +Caused by: + invalid type: string \"a\", expected a boolean or an integer for [..] +")); +} + +#[test] +fn bad_opt_level() { + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.0.0" + authors = [] + build = 3 + "#) + .file("src/lib.rs", ""); + + assert_that(p.cargo_process("build"), + execs().with_status(101).with_stderr("\ +error: failed to parse manifest at `[..]` + +Caused by: + invalid type: integer `3`, expected a boolean or a string for key [..] +")); +} diff -Nru cargo-0.17.0/tests/bad-manifest-path.rs cargo-0.19.0/tests/bad-manifest-path.rs --- cargo-0.17.0/tests/bad-manifest-path.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/bad-manifest-path.rs 2017-05-16 03:23:10.000000000 +0000 @@ -21,7 +21,7 @@ fn assert_cargo_toml_doesnt_exist(command: &str, manifest_path_argument: &str) { let p = project("foo"); let expected_path = manifest_path_argument - .split("/").collect::>().join("[..]"); + .split('/').collect::>().join("[..]"); assert_that(p.cargo_process(command) .arg("--manifest-path").arg(manifest_path_argument) diff -Nru cargo-0.17.0/tests/bench.rs cargo-0.19.0/tests/bench.rs --- cargo-0.17.0/tests/bench.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/bench.rs 2017-05-16 03:23:10.000000000 +0000 @@ -243,17 +243,17 @@ execs().with_stderr(&format!("\ [COMPILING] foo v0.0.1 ({}) [FINISHED] release [optimized] target(s) in [..] -[RUNNING] target[/]release[/]deps[/]baz-[..][EXE] -[RUNNING] target[/]release[/]deps[/]foo-[..][EXE]", p.url())) +[RUNNING] target[/]release[/]deps[/]foo-[..][EXE] +[RUNNING] target[/]release[/]deps[/]baz-[..][EXE]", p.url())) .with_stdout(" running 1 test -test bin_bench ... bench: [..] 0 ns/iter (+/- 0) +test lib_bench ... bench: [..] 0 ns/iter (+/- 0) test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured running 1 test -test lib_bench ... bench: [..] 0 ns/iter (+/- 0) +test bin_bench ... bench: [..] 0 ns/iter (+/- 0) test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured @@ -353,17 +353,17 @@ execs().with_stderr(&format!("\ [COMPILING] foo v0.0.1 ({}) [FINISHED] release [optimized] target(s) in [..] -[RUNNING] target[/]release[/]deps[/]bench-[..][EXE] -[RUNNING] target[/]release[/]deps[/]foo-[..][EXE]", p.url())) +[RUNNING] target[/]release[/]deps[/]foo-[..][EXE] +[RUNNING] target[/]release[/]deps[/]bench-[..][EXE]", p.url())) .with_stdout(" running 1 test -test external_bench ... bench: [..] 0 ns/iter (+/- 0) +test internal_bench ... bench: [..] 0 ns/iter (+/- 0) test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured running 1 test -test internal_bench ... bench: [..] 0 ns/iter (+/- 0) +test external_bench ... bench: [..] 0 ns/iter (+/- 0) test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured @@ -403,17 +403,17 @@ execs().with_stderr(&format!("\ [COMPILING] foo v0.0.1 ({}) [FINISHED] release [optimized] target(s) in [..] -[RUNNING] target[/]release[/]deps[/]external-[..][EXE] -[RUNNING] target[/]release[/]deps[/]foo-[..][EXE]", p.url())) +[RUNNING] target[/]release[/]deps[/]foo-[..][EXE] +[RUNNING] target[/]release[/]deps[/]external-[..][EXE]", p.url())) .with_stdout(" running 1 test -test external_bench ... bench: [..] 0 ns/iter (+/- 0) +test internal_bench ... bench: [..] 0 ns/iter (+/- 0) test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured running 1 test -test internal_bench ... bench: [..] 0 ns/iter (+/- 0) +test external_bench ... bench: [..] 0 ns/iter (+/- 0) test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured @@ -600,17 +600,17 @@ .with_stderr(&format!("\ [COMPILING] syntax v0.0.1 ({dir}) [FINISHED] release [optimized] target(s) in [..] -[RUNNING] target[/]release[/]deps[/]bench-[..][EXE] -[RUNNING] target[/]release[/]deps[/]syntax-[..][EXE]", dir = p.url())) +[RUNNING] target[/]release[/]deps[/]syntax-[..][EXE] +[RUNNING] target[/]release[/]deps[/]bench-[..][EXE]", dir = p.url())) .with_stdout(" running 1 test -test bench ... bench: [..] 0 ns/iter (+/- 0) +test foo_bench ... bench: [..] 0 ns/iter (+/- 0) test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured running 1 test -test foo_bench ... bench: [..] 0 ns/iter (+/- 0) +test bench ... bench: [..] 0 ns/iter (+/- 0) test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured @@ -722,8 +722,8 @@ [RUNNING] [..] -C opt-level=3 [..] [RUNNING] [..] -C opt-level=3 [..] [FINISHED] release [optimized] target(s) in [..] -[RUNNING] `[..]target[/]release[/]deps[/]bench-[..][EXE] --bench` -[RUNNING] `[..]target[/]release[/]deps[/]foo-[..][EXE] --bench`", dir = p.url())) +[RUNNING] `[..]target[/]release[/]deps[/]foo-[..][EXE] --bench` +[RUNNING] `[..]target[/]release[/]deps[/]bench-[..][EXE] --bench`", dir = p.url())) .with_stdout(" running 1 test test foo ... bench: [..] 0 ns/iter (+/- 0) @@ -744,8 +744,8 @@ [FRESH] bar v0.0.1 ({dir}/bar) [FRESH] foo v0.0.1 ({dir}) [FINISHED] release [optimized] target(s) in [..] -[RUNNING] `[..]target[/]release[/]deps[/]bench-[..][EXE] --bench` -[RUNNING] `[..]target[/]release[/]deps[/]foo-[..][EXE] --bench`", dir = p.url())) +[RUNNING] `[..]target[/]release[/]deps[/]foo-[..][EXE] --bench` +[RUNNING] `[..]target[/]release[/]deps[/]bench-[..][EXE] --bench`", dir = p.url())) .with_stdout(" running 1 test test foo ... bench: [..] 0 ns/iter (+/- 0) @@ -871,18 +871,18 @@ [RUNNING] `rustc [..]` [RUNNING] `rustc [..]` [FINISHED] release [optimized] target(s) in [..] -[RUNNING] `{dir}[/]target[/]release[/]deps[/]testb1-[..][EXE] --bench` -[RUNNING] `{dir}[/]target[/]release[/]deps[/]testbench-[..][EXE] --bench`", +[RUNNING] `{dir}[/]target[/]release[/]deps[/]testbench-[..][EXE] --bench` +[RUNNING] `{dir}[/]target[/]release[/]deps[/]testb1-[..][EXE] --bench`", dir = p.root().display(), url = p.url())) .with_stdout(" running 1 test -test bench_bench2 ... bench: [..] 0 ns/iter (+/- 0) +test bench_bench1 ... bench: [..] 0 ns/iter (+/- 0) test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured running 1 test -test bench_bench1 ... bench: [..] 0 ns/iter (+/- 0) +test bench_bench2 ... bench: [..] 0 ns/iter (+/- 0) test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured diff -Nru cargo-0.17.0/tests/build-auth.rs cargo-0.19.0/tests/build-auth.rs --- cargo-0.17.0/tests/build-auth.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/build-auth.rs 2017-05-16 03:23:10.000000000 +0000 @@ -203,7 +203,7 @@ ", addr = addr)) .with_stderr_contains("\ Caused by: - [[..]] Failed to start SSH session: Failed getting banner + [[..]] failed to start SSH session: Failed getting banner ")); t.join().ok().unwrap(); } diff -Nru cargo-0.17.0/tests/build.rs cargo-0.19.0/tests/build.rs --- cargo-0.17.0/tests/build.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/build.rs 2017-05-16 03:23:10.000000000 +0000 @@ -40,15 +40,16 @@ let p = project("foo") .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])); + p.build(); assert_that( - p.cargo_process("build").arg("-v").env("CARGO_INCREMENTAL", "1"), + p.cargo("build").arg("-v").env("CARGO_INCREMENTAL", "1"), execs().with_stderr_contains( "[RUNNING] `rustc [..] -Zincremental=[..][/]target[/]debug[/]incremental`\n") .with_status(0)); assert_that( - p.cargo_process("test").arg("-v").env("CARGO_INCREMENTAL", "1"), + p.cargo("test").arg("-v").env("CARGO_INCREMENTAL", "1"), execs().with_stderr_contains( "[RUNNING] `rustc [..] -Zincremental=[..][/]target[/]debug[/]incremental`\n") .with_status(0)); @@ -99,8 +100,9 @@ Caused by: could not parse input as TOML -Cargo.toml:3:19-3:20 expected a value +Caused by: + invalid number at line 3 ")) } @@ -123,8 +125,11 @@ [ERROR] failed to parse manifest at `[..]` Caused by: - could not parse input as TOML\n\ -src[/]Cargo.toml:1:5-1:6 expected a value\n\n")) + could not parse input as TOML + +Caused by: + invalid number at line 1 +")) } #[test] @@ -174,7 +179,7 @@ [ERROR] failed to parse manifest at `[..]` Caused by: - cannot parse '1.0' as a semver for the key `project.version` + cannot parse '1.0' as a semver for key `project.version` ")) } @@ -700,22 +705,23 @@ .file("examples/a.rs", r#" fn main() { println!("example"); } "#); + p.build(); - assert_that(p.cargo_process("build").arg("--bin").arg("bin.rs"), + assert_that(p.cargo("build").arg("--bin").arg("bin.rs"), execs().with_status(101).with_stderr("\ [ERROR] no bin target named `bin.rs`")); - assert_that(p.cargo_process("build").arg("--bin").arg("a.rs"), + assert_that(p.cargo("build").arg("--bin").arg("a.rs"), execs().with_status(101).with_stderr("\ [ERROR] no bin target named `a.rs` Did you mean `a`?")); - assert_that(p.cargo_process("build").arg("--example").arg("example.rs"), + assert_that(p.cargo("build").arg("--example").arg("example.rs"), execs().with_status(101).with_stderr("\ [ERROR] no example target named `example.rs`")); - assert_that(p.cargo_process("build").arg("--example").arg("a.rs"), + assert_that(p.cargo("build").arg("--example").arg("a.rs"), execs().with_status(101).with_stderr("\ [ERROR] no example target named `a.rs` @@ -754,9 +760,8 @@ assert_that(p.cargo("build"), execs().with_status(101).with_stderr("\ -[ERROR] no matching package named `bar` found (required by `foo`) +[ERROR] no matching version `= 0.0.1` found for package `bar` (required by `foo`) location searched: [..] -version required: = 0.0.1 versions found: 0.0.2 consider running `cargo update` to update a path dependency's locked version ")); @@ -814,9 +819,10 @@ crate_type = ["dylib"] "#) .file("bar/src/lib.rs", "// hello"); + p.build(); // No metadata on libbar since it's a dylib path dependency - assert_that(p.cargo_process("build").arg("-v"), + assert_that(p.cargo("build").arg("-v"), execs().with_status(0).with_stderr(&format!("\ [COMPILING] bar v0.0.1 ({url}/bar) [RUNNING] `rustc --crate-name bar bar[/]src[/]lib.rs --crate-type dylib \ @@ -840,10 +846,10 @@ suffix = env::consts::DLL_SUFFIX, ))); - assert_that(p.cargo_process("clean"), execs().with_status(0)); + assert_that(p.cargo("clean"), execs().with_status(0)); // If you set the env-var, then we expect metadata on libbar - assert_that(p.cargo_process("build").arg("-v").env("__CARGO_DEFAULT_LIB_METADATA", "1"), + assert_that(p.cargo("build").arg("-v").env("__CARGO_DEFAULT_LIB_METADATA", "1"), execs().with_status(0).with_stderr(&format!("\ [COMPILING] bar v0.0.1 ({url}/bar) [RUNNING] `rustc --crate-name bar bar[/]src[/]lib.rs --crate-type dylib \ @@ -873,39 +879,39 @@ fn crate_env_vars() { let p = project("foo") .file("Cargo.toml", r#" - [project] - name = "foo" - version = "0.5.1-alpha.1" - description = "This is foo" - homepage = "http://example.com" - authors = ["wycats@example.com"] + [project] + name = "foo" + version = "0.5.1-alpha.1" + description = "This is foo" + homepage = "http://example.com" + authors = ["wycats@example.com"] "#) .file("src/main.rs", r#" extern crate foo; - static VERSION_MAJOR: &'static str = env!("CARGO_PKG_VERSION_MAJOR"); - static VERSION_MINOR: &'static str = env!("CARGO_PKG_VERSION_MINOR"); - static VERSION_PATCH: &'static str = env!("CARGO_PKG_VERSION_PATCH"); - static VERSION_PRE: &'static str = env!("CARGO_PKG_VERSION_PRE"); - static VERSION: &'static str = env!("CARGO_PKG_VERSION"); - static CARGO_MANIFEST_DIR: &'static str = env!("CARGO_MANIFEST_DIR"); - static PKG_NAME: &'static str = env!("CARGO_PKG_NAME"); - static HOMEPAGE: &'static str = env!("CARGO_PKG_HOMEPAGE"); - static DESCRIPTION: &'static str = env!("CARGO_PKG_DESCRIPTION"); + static VERSION_MAJOR: &'static str = env!("CARGO_PKG_VERSION_MAJOR"); + static VERSION_MINOR: &'static str = env!("CARGO_PKG_VERSION_MINOR"); + static VERSION_PATCH: &'static str = env!("CARGO_PKG_VERSION_PATCH"); + static VERSION_PRE: &'static str = env!("CARGO_PKG_VERSION_PRE"); + static VERSION: &'static str = env!("CARGO_PKG_VERSION"); + static CARGO_MANIFEST_DIR: &'static str = env!("CARGO_MANIFEST_DIR"); + static PKG_NAME: &'static str = env!("CARGO_PKG_NAME"); + static HOMEPAGE: &'static str = env!("CARGO_PKG_HOMEPAGE"); + static DESCRIPTION: &'static str = env!("CARGO_PKG_DESCRIPTION"); fn main() { let s = format!("{}-{}-{} @ {} in {}", VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, VERSION_PRE, CARGO_MANIFEST_DIR); - assert_eq!(s, foo::version()); - println!("{}", s); - assert_eq!("foo", PKG_NAME); - assert_eq!("http://example.com", HOMEPAGE); - assert_eq!("This is foo", DESCRIPTION); + assert_eq!(s, foo::version()); + println!("{}", s); + assert_eq!("foo", PKG_NAME); + assert_eq!("http://example.com", HOMEPAGE); + assert_eq!("This is foo", DESCRIPTION); let s = format!("{}.{}.{}-{}", VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, VERSION_PRE); - assert_eq!(s, VERSION); + assert_eq!(s, VERSION); } "#) .file("src/lib.rs", r#" @@ -1447,15 +1453,16 @@ authors = [] "#) .file("bar/src/lib.rs", ""); + p.build(); - assert_that(p.cargo_process("build"), execs().with_status(0)); - let p = p.file("Cargo.toml", r#" - [package] - name = "foo" - version = "0.0.1" - authors = [] - "#); - assert_that(p.cargo_process("build"), execs().with_status(101)); + assert_that(p.cargo("build"), execs().with_status(0)); + p.change_file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.0.1" + authors = [] + "#); + assert_that(p.cargo("build"), execs().with_status(101)); } #[test] @@ -1751,8 +1758,9 @@ Caused by: could not parse input as TOML -[..].cargo[..]config:2:20-2:21 expected `=`, but found `i` +Caused by: + expected an equals, found an identifier at line 2 ")); } @@ -2356,7 +2364,6 @@ doctest = false "#) .file("d2/src/main.rs", "fn main() { println!(\"d2\"); }"); - p.build(); assert_that(p.cargo_process("build").arg("-p").arg("d1").arg("-p").arg("d2") .arg("-p").arg("foo"), @@ -2408,12 +2415,12 @@ .file("d1/src/main.rs", "fn main() { println!(\"d1\"); }"); p.build(); - assert_that(p.cargo_process("build").arg("-p").arg("notAValidDep"), + assert_that(p.cargo("build").arg("-p").arg("notAValidDep"), execs().with_status(101).with_stderr("\ [ERROR] package id specification `notAValidDep` matched no packages ")); - assert_that(p.cargo_process("build").arg("-p").arg("d1").arg("-p").arg("notAValidDep"), + assert_that(p.cargo("build").arg("-p").arg("d1").arg("-p").arg("notAValidDep"), execs().with_status(101).with_stderr("\ [ERROR] package id specification `notAValidDep` matched no packages ")); @@ -2435,9 +2442,6 @@ #[test] fn panic_abort_compiles_with_panic_abort() { - if !is_nightly() { - return - } let p = project("foo") .file("Cargo.toml", r#" [package] @@ -2456,22 +2460,23 @@ #[test] fn explicit_color_config_is_propagated_to_rustc() { - let mut p = project("foo"); - p = p - .file("Cargo.toml", r#" - [package] - - name = "test" - version = "0.0.0" - authors = [] - "#) - .file("src/lib.rs", ""); + let p = project("foo") + .file("Cargo.toml", r#" + [package] - assert_that(p.cargo_process("build").arg("-v").arg("--color").arg("always"), + name = "test" + version = "0.0.0" + authors = [] + "#) + .file("src/lib.rs", ""); + p.build(); + assert_that(p.cargo("build").arg("-v").arg("--color").arg("always"), execs().with_status(0).with_stderr_contains( "[..]rustc [..] src[/]lib.rs --color always[..]")); - assert_that(p.cargo_process("build").arg("-v").arg("--color").arg("never"), + assert_that(p.cargo("clean"), execs().with_status(0)); + + assert_that(p.cargo("build").arg("-v").arg("--color").arg("never"), execs().with_status(0).with_stderr("\ [COMPILING] test v0.0.0 ([..]) [RUNNING] `rustc [..] --color never [..]` @@ -2481,8 +2486,6 @@ #[test] fn compiler_json_error_format() { - if !is_nightly() { return } - let p = project("foo") .file("Cargo.toml", r#" [project] @@ -2503,14 +2506,20 @@ authors = ["wycats@example.com"] "#) .file("bar/src/lib.rs", r#"fn dead() {}"#); + p.build(); - assert_that(p.cargo_process("build").arg("-v") + assert_that(p.cargo("build").arg("-v") .arg("--message-format").arg("json"), execs().with_status(0).with_json(r#" { "reason":"compiler-message", "package_id":"bar 0.5.0 ([..])", - "target":{"kind":["lib"],"name":"bar","src_path":"[..]lib.rs"}, + "target":{ + "kind":["lib"], + "crate_types":["lib"], + "name":"bar", + "src_path":"[..]lib.rs" + }, "message":"{...}" } @@ -2520,33 +2529,100 @@ "debug_assertions": true, "debuginfo": 2, "opt_level": "0", + "overflow_checks": true, "test": false }, "features": [], "package_id":"bar 0.5.0 ([..])", - "target":{"kind":["lib"],"name":"bar","src_path":"[..]lib.rs"}, - "filenames":["[..].rlib"] + "target":{ + "kind":["lib"], + "crate_types":["lib"], + "name":"bar", + "src_path":"[..]lib.rs" + }, + "filenames":["[..].rlib"], + "fresh": false } { "reason":"compiler-message", "package_id":"foo 0.5.0 ([..])", - "target":{"kind":["bin"],"name":"foo","src_path":"[..]main.rs"}, + "target":{ + "kind":["bin"], + "crate_types":["bin"], + "name":"foo", + "src_path":"[..]main.rs" + }, "message":"{...}" } { "reason":"compiler-artifact", "package_id":"foo 0.5.0 ([..])", - "target":{"kind":["bin"],"name":"foo","src_path":"[..]main.rs"}, + "target":{ + "kind":["bin"], + "crate_types":["bin"], + "name":"foo", + "src_path":"[..]main.rs" + }, "profile": { "debug_assertions": true, "debuginfo": 2, "opt_level": "0", + "overflow_checks": true, "test": false }, "features": [], - "filenames": ["[..]"] + "filenames": ["[..]"], + "fresh": false + } +"#)); + + // With fresh build, we should repeat the artifacts, + // but omit compiler warnings. + assert_that(p.cargo("build").arg("-v") + .arg("--message-format").arg("json"), + execs().with_status(0).with_json(r#" + { + "reason":"compiler-artifact", + "profile": { + "debug_assertions": true, + "debuginfo": 2, + "opt_level": "0", + "overflow_checks": true, + "test": false + }, + "features": [], + "package_id":"bar 0.5.0 ([..])", + "target":{ + "kind":["lib"], + "crate_types":["lib"], + "name":"bar", + "src_path":"[..]lib.rs" + }, + "filenames":["[..].rlib"], + "fresh": true + } + + { + "reason":"compiler-artifact", + "package_id":"foo 0.5.0 ([..])", + "target":{ + "kind":["bin"], + "crate_types":["bin"], + "name":"foo", + "src_path":"[..]main.rs" + }, + "profile": { + "debug_assertions": true, + "debuginfo": 2, + "opt_level": "0", + "overflow_checks": true, + "test": false + }, + "features": [], + "filenames": ["[..]"], + "fresh": true } "#)); } @@ -2556,7 +2632,6 @@ let p = project("foo") .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/main.rs", "fn main() {}"); - p.build(); assert_that(p.cargo_process("build").arg("--message-format").arg("XML"), execs().with_status(1) @@ -2580,22 +2655,34 @@ { "reason":"compiler-message", "package_id":"foo 0.5.0 ([..])", - "target":{"kind":["bin"],"name":"foo","src_path":"[..]"}, + "target":{ + "kind":["bin"], + "crate_types":["bin"], + "name":"foo", + "src_path":"[..]" + }, "message":"{...}" } { "reason":"compiler-artifact", "package_id":"foo 0.5.0 ([..])", - "target":{"kind":["bin"],"name":"foo","src_path":"[..]"}, + "target":{ + "kind":["bin"], + "crate_types":["bin"], + "name":"foo", + "src_path":"[..]" + }, "profile":{ "debug_assertions":true, "debuginfo":2, "opt_level":"0", + "overflow_checks": true, "test":false }, "features":[], - "filenames":["[..]"] + "filenames":[], + "fresh": false } "#)); } @@ -2629,7 +2716,6 @@ let p = project("foo") .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/main.rs", "fn main() {}"); - p.build(); assert_that(p.cargo_process("build").arg("--target").arg(""), execs().with_status(101) @@ -2660,7 +2746,6 @@ .file("bar/src/lib.rs", r#" pub fn bar() {} "#); - p.build(); assert_that(p.cargo_process("build") .arg("--all"), @@ -2693,7 +2778,6 @@ .file("bar/src/lib.rs", r#" pub fn bar() {} "#); - p.build(); // The order in which foo and bar are built is not guaranteed assert_that(p.cargo_process("build") @@ -2724,7 +2808,6 @@ .file("a/src/lib.rs", r#" pub fn a() {} "#); - p.build(); Package::new("a", "0.1.0").publish(); @@ -2738,3 +2821,153 @@ [..] Finished dev [unoptimized + debuginfo] target(s) in [..]\n")); } +#[test] +fn run_proper_binary() { + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + authors = [] + version = "0.0.0" + [[bin]] + name = "main" + [[bin]] + name = "other" + "#) + .file("src/lib.rs", "") + .file("src/bin/main.rs", r#" + fn main() { + panic!("This should never be run."); + } + "#) + .file("src/bin/other.rs", r#" + fn main() { + } + "#); + + assert_that(p.cargo_process("run").arg("--bin").arg("other"), + execs().with_status(0)); +} + +#[test] +fn run_proper_binary_main_rs() { + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + authors = [] + version = "0.0.0" + [[bin]] + name = "foo" + "#) + .file("src/lib.rs", "") + .file("src/bin/main.rs", r#" + fn main() { + } + "#); + + assert_that(p.cargo_process("run").arg("--bin").arg("foo"), + execs().with_status(0)); +} + +#[test] +fn run_proper_alias_binary_from_src() { + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + authors = [] + version = "0.0.0" + [[bin]] + name = "foo" + [[bin]] + name = "bar" + "#) + .file("src/foo.rs", r#" + fn main() { + println!("foo"); + } + "#).file("src/bar.rs", r#" + fn main() { + println!("bar"); + } + "#); + + assert_that(p.cargo_process("build") + .arg("--all"), + execs().with_status(0) + ); + assert_that(process(&p.bin("foo")), + execs().with_status(0).with_stdout("foo\n")); + assert_that(process(&p.bin("bar")), + execs().with_status(0).with_stdout("bar\n")); +} + +#[test] +fn run_proper_alias_binary_main_rs() { + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + authors = [] + version = "0.0.0" + [[bin]] + name = "foo" + [[bin]] + name = "bar" + "#) + .file("src/main.rs", r#" + fn main() { + println!("main"); + } + "#); + + assert_that(p.cargo_process("build") + .arg("--all"), + execs().with_status(0) + ); + assert_that(process(&p.bin("foo")), + execs().with_status(0).with_stdout("main\n")); + assert_that(process(&p.bin("bar")), + execs().with_status(0).with_stdout("main\n")); +} + +#[test] +fn run_proper_binary_main_rs_as_foo() { + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + authors = [] + version = "0.0.0" + [[bin]] + name = "foo" + "#) + .file("src/foo.rs", r#" + fn main() { + panic!("This should never be run."); + } + "#) + .file("src/main.rs", r#" + fn main() { + } + "#); + + assert_that(p.cargo_process("run").arg("--bin").arg("foo"), + execs().with_status(0)); +} + +#[test] +fn rustc_wrapper() { + // We don't have /usr/bin/env on Windows. + if cfg!(windows) { return } + + let p = project("foo") + .file("Cargo.toml", &basic_bin_manifest("foo")) + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])); + + assert_that(p.cargo_process("build").arg("-v").env("RUSTC_WRAPPER", "/usr/bin/env"), + execs().with_stderr_contains( + "[RUNNING] `/usr/bin/env rustc --crate-name foo [..]") + .with_status(0)); +} diff -Nru cargo-0.17.0/tests/build-script.rs cargo-0.19.0/tests/build-script.rs --- cargo-0.17.0/tests/build-script.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/build-script.rs 2017-05-16 03:23:10.000000000 +0000 @@ -4,7 +4,7 @@ use std::fs::{self, File}; use std::io::prelude::*; -use cargotest::{rustc_host, is_nightly, sleep_ms}; +use cargotest::{rustc_host, sleep_ms}; use cargotest::support::{project, execs}; use cargotest::support::paths::CargoPathExt; use cargotest::support::registry::Package; @@ -306,7 +306,7 @@ [..] [..] [..] -[RUNNING] `rustc --crate-name foo [..] -L foo -L bar[..]` +[RUNNING] `rustc --crate-name foo [..] -L foo -L bar` [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ")); } @@ -926,7 +926,9 @@ ").unwrap(); } "#); - assert_that(p.cargo_process("run"), + p.build(); + + assert_that(p.cargo("run"), execs().with_status(0) .with_stderr("\ [COMPILING] foo v0.5.0 (file://[..]) @@ -936,7 +938,7 @@ Hello, World! ")); - assert_that(p.cargo_process("test"), + assert_that(p.cargo("test"), execs().with_status(0)); } @@ -1781,6 +1783,87 @@ ")); } + +#[test] +fn fresh_builds_possible_with_link_libs() { + // The bug is non-deterministic. Sometimes you can get a fresh build + let target = rustc_host(); + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.5.0" + authors = [] + links = "nativefoo" + build = "build.rs" + "#) + .file("src/lib.rs", "") + .file(".cargo/config", &format!(" + [target.{}.nativefoo] + rustc-link-lib = [\"a\"] + rustc-link-search = [\"./b\"] + rustc-flags = \"-l z -L ./\" + ", target)) + .file("build.rs", ""); + + assert_that(p.cargo_process("build").arg("-v"), + execs().with_status(0).with_stderr("\ +[COMPILING] foo v0.5.0 ([..] +[RUNNING] `rustc [..]` +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +")); + + assert_that(p.cargo("build") + .arg("-v") + .env("RUST_LOG", "cargo::ops::cargo_rustc::fingerprint=info"), + execs().with_status(0).with_stderr("\ +[FRESH] foo v0.5.0 ([..]) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +")); +} + + +#[test] +fn fresh_builds_possible_with_multiple_metadata_overrides() { + // The bug is non-deterministic. Sometimes you can get a fresh build + let target = rustc_host(); + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.5.0" + authors = [] + links = "foo" + build = "build.rs" + "#) + .file("src/lib.rs", "") + .file(".cargo/config", &format!(" + [target.{}.foo] + a = \"\" + b = \"\" + c = \"\" + d = \"\" + e = \"\" + ", target)) + .file("build.rs", ""); + + assert_that(p.cargo_process("build").arg("-v"), + execs().with_status(0).with_stderr("\ +[COMPILING] foo v0.5.0 ([..] +[RUNNING] `rustc [..]` +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +")); + + assert_that(p.cargo("build") + .arg("-v") + .env("RUST_LOG", "cargo::ops::cargo_rustc::fingerprint=info"), + execs().with_status(0).with_stderr("\ +[FRESH] foo v0.5.0 ([..]) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +")); +} + + #[test] fn rebuild_only_on_explicit_paths() { let p = project("a") @@ -2011,9 +2094,6 @@ #[test] fn panic_abort_with_build_scripts() { - if !is_nightly() { - return - } let p = project("foo") .file("Cargo.toml", r#" [project] @@ -2350,8 +2430,8 @@ "#) .file("src/main.rs", r#" fn main() { - if cfg!(foo) { - panic!("the build script was run"); + if ! cfg!(foo) { + panic!("the build script was not run"); } } "#) @@ -2363,14 +2443,7 @@ p.build(); assert_that(p.cargo("run").arg("-v"), - execs().with_status(0).with_stderr("\ -warning: `build.rs` files in the same directory as your `Cargo.toml` will soon be treated \ -as build scripts. Add `build = false` to your `Cargo.toml` to prevent this - Compiling builder v0.0.1 ([..]) - Running [..] - Finished [..] - Running [..] -")); + execs().with_status(0)); } #[test] diff -Nru cargo-0.17.0/tests/cargo.rs cargo-0.19.0/tests/cargo.rs --- cargo-0.17.0/tests/cargo.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/cargo.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,3 +1,4 @@ +extern crate cargo; extern crate cargotest; extern crate hamcrest; @@ -10,8 +11,8 @@ use cargotest::cargo_process; use cargotest::support::paths::{self, CargoPathExt}; -use cargotest::support::{execs, project, ProjectBuilder}; -use hamcrest::{assert_that}; +use cargotest::support::{execs, project, ProjectBuilder, basic_bin_manifest}; +use hamcrest::{assert_that, existing_file}; #[cfg_attr(windows,allow(dead_code))] enum FakeKind<'a> { @@ -64,7 +65,7 @@ #[test] fn list_command_looks_at_path() { let proj = project("list-non-overlapping"); - let proj = fake_file(proj, &Path::new("path-test"), "cargo-1", FakeKind::Executable); + let proj = fake_file(proj, Path::new("path-test"), "cargo-1", FakeKind::Executable); let mut pr = cargo_process(); let mut path = path(); @@ -81,11 +82,11 @@ #[cfg(unix)] #[test] fn list_command_resolves_symlinks() { - use cargotest::support::cargo_dir; + use cargotest::support::cargo_exe; let proj = project("list-non-overlapping"); - let proj = fake_file(proj, &Path::new("path-test"), "cargo-2", - FakeKind::Symlink{target:&cargo_dir().join("cargo")}); + let proj = fake_file(proj, Path::new("path-test"), "cargo-2", + FakeKind::Symlink{target:&cargo_exe()}); let mut pr = cargo_process(); let mut path = path(); @@ -162,6 +163,37 @@ } #[test] +fn cargo_subcommand_env() { + use cargotest::support::cargo_exe; + + let src = format!(r#" + use std::env; + + fn main() {{ + println!("{{}}", env::var("{}").unwrap()); + }} + "#, cargo::CARGO_ENV); + + let p = project("cargo-envtest") + .file("Cargo.toml", &basic_bin_manifest("cargo-envtest")) + .file("src/main.rs", &src); + + let target_dir = p.target_debug_dir(); + + assert_that(p.cargo_process("build"), execs().with_status(0)); + assert_that(&p.bin("cargo-envtest"), existing_file()); + + let mut pr = cargo_process(); + let cargo = cargo_exe().canonicalize().unwrap(); + let mut path = path(); + path.push(target_dir); + let path = env::join_paths(path.iter()).unwrap(); + + assert_that(pr.arg("envtest").env("PATH", &path), + execs().with_status(0).with_stdout(cargo.to_str().unwrap())); +} + +#[test] fn cargo_help() { assert_that(cargo_process(), execs().with_status(0)); diff -Nru cargo-0.17.0/tests/cargotest/Cargo.toml cargo-0.19.0/tests/cargotest/Cargo.toml --- cargo-0.17.0/tests/cargotest/Cargo.toml 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/cargotest/Cargo.toml 2017-05-16 03:23:10.000000000 +0000 @@ -12,11 +12,13 @@ filetime = "0.1" flate2 = "0.2" git2 = { version = "0.6", default-features = false } -hamcrest = "0.1" +hamcrest = "=0.1.1" kernel32-sys = "0.2" libc = "0.2" log = "0.3" rustc-serialize = "0.3" +serde = "0.9" +serde_json = "0.9" tar = { version = "0.4", default-features = false } tempdir = "0.3" term = "0.4.4" diff -Nru cargo-0.17.0/tests/cargotest/lib.rs cargo-0.19.0/tests/cargotest/lib.rs --- cargo-0.17.0/tests/cargotest/lib.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/cargotest/lib.rs 2017-05-16 03:23:10.000000000 +0000 @@ -8,6 +8,9 @@ extern crate hamcrest; extern crate libc; extern crate rustc_serialize; +extern crate serde; +#[macro_use] +extern crate serde_json; extern crate tar; extern crate tempdir; extern crate term; @@ -24,7 +27,7 @@ pub mod support; pub mod install; -thread_local!(pub static RUSTC: Rustc = Rustc::new(PathBuf::from("rustc")).unwrap()); +thread_local!(pub static RUSTC: Rustc = Rustc::new(PathBuf::from("rustc"), None).unwrap()); pub fn rustc_host() -> String { RUSTC.with(|r| r.host.clone()) @@ -47,10 +50,20 @@ .env_remove("CARGO_HOME") .env("HOME", support::paths::home()) .env("CARGO_HOME", support::paths::home().join(".cargo")) + .env("__CARGO_TEST_ROOT", support::paths::root()) + .env_remove("__CARGO_DEFAULT_LIB_METADATA") .env_remove("RUSTC") + .env_remove("RUSTDOC") + .env_remove("RUSTC_WRAPPER") .env_remove("RUSTFLAGS") + .env_remove("CARGO_INCREMENTAL") .env_remove("XDG_CONFIG_HOME") // see #2345 .env("GIT_CONFIG_NOSYSTEM", "1") // keep trying to sandbox ourselves + .env_remove("EMAIL") + .env_remove("GIT_AUTHOR_NAME") + .env_remove("GIT_AUTHOR_EMAIL") + .env_remove("GIT_COMMITTER_NAME") + .env_remove("GIT_COMMITTER_EMAIL") .env_remove("CARGO_TARGET_DIR") // we assume 'target' .env_remove("MSYSTEM"); // assume cmd.exe everywhere on windows @@ -86,7 +99,7 @@ } pub fn cargo_process() -> cargo::util::ProcessBuilder { - process(&support::cargo_dir().join("cargo")) + process(&support::cargo_exe()) } pub fn sleep_ms(ms: u64) { diff -Nru cargo-0.17.0/tests/cargotest/support/mod.rs cargo-0.19.0/tests/cargotest/support/mod.rs --- cargo-0.17.0/tests/cargotest/support/mod.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/cargotest/support/mod.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,3 +1,4 @@ +use std::cell::Cell; use std::env; use std::error::Error; use std::ffi::OsStr; @@ -10,7 +11,7 @@ use std::str; use std::usize; -use rustc_serialize::json::Json; +use serde_json::{self, Value}; use url::Url; use hamcrest as ham; use cargo::util::ProcessBuilder; @@ -95,7 +96,8 @@ name: String, root: PathBuf, files: Vec, - symlinks: Vec + symlinks: Vec, + is_build: Cell, } impl ProjectBuilder { @@ -104,7 +106,8 @@ name: name.to_string(), root: root, files: vec![], - symlinks: vec![] + symlinks: vec![], + is_build: Cell::new(false), } } @@ -159,7 +162,10 @@ } pub fn cargo(&self, cmd: &str) -> ProcessBuilder { - let mut p = self.process(&cargo_dir().join("cargo")); + assert!(self.is_build.get(), + "call `.build()` before calling `.cargo()`, \ + or use `.cargo_process()`"); + let mut p = self.process(&cargo_exe()); p.arg(cmd); return p; } @@ -175,6 +181,11 @@ self } + pub fn change_file(&self, path: &str, body: &str) { + assert!(self.is_build.get()); + FileBuilder::new(self.root.join(path), body).mk() + } + pub fn symlink>(mut self, dst: T, src: T) -> ProjectBuilder { self.symlinks.push(SymlinkBuilder::new(self.root.join(dst), @@ -184,6 +195,9 @@ // TODO: return something different than a ProjectBuilder pub fn build(&self) -> &ProjectBuilder { + assert!(!self.is_build.get(), + "can `.build()` project only once"); + self.is_build.set(true); // First, clean the directory if it already exists self.rm_root(); @@ -303,6 +317,10 @@ }) } +pub fn cargo_exe() -> PathBuf { + cargo_dir().join(format!("cargo{}", env::consts::EXE_SUFFIX)) +} + /// Returns an absolute path in the filesystem that `path` points to. The /// returned path does not contain any symlinks in its hierarchy. /* @@ -321,7 +339,7 @@ expect_stderr_contains: Vec, expect_stdout_not_contains: Vec, expect_stderr_not_contains: Vec, - expect_json: Option>, + expect_json: Option>, } impl Execs { @@ -362,7 +380,7 @@ pub fn with_json(mut self, expected: &str) -> Execs { self.expect_json = Some(expected.split("\n\n").map(|obj| { - Json::from_str(obj).unwrap() + obj.parse().unwrap() }).collect()); self } @@ -486,8 +504,8 @@ } } - fn match_json(&self, expected: &Json, line: &str) -> ham::MatchResult { - let actual = match Json::from_str(line) { + fn match_json(&self, expected: &Value, line: &str) -> ham::MatchResult { + let actual = match line.parse() { Err(e) => return Err(format!("invalid json, {}:\n`{}`", e, line)), Ok(actual) => actual, }; @@ -495,8 +513,10 @@ match find_mismatch(expected, &actual) { Some((expected_part, actual_part)) => Err(format!( "JSON mismatch\nExpected:\n{}\nWas:\n{}\nExpected part:\n{}\nActual part:\n{}\n", - expected.pretty(), actual.pretty(), - expected_part.pretty(), actual_part.pretty() + serde_json::to_string_pretty(expected).unwrap(), + serde_json::to_string_pretty(&actual).unwrap(), + serde_json::to_string_pretty(expected_part).unwrap(), + serde_json::to_string_pretty(actual_part).unwrap(), )), None => Ok(()), } @@ -569,32 +589,42 @@ } // Compares JSON object for approximate equality. -// You can use `[..]` wildcard in strings (useful for OS dependent things such as paths). -// You can use a `"{...}"` string literal as a wildcard for arbitrary nested JSON (useful -// for parts of object emitted by other programs (e.g. rustc) rather than Cargo itself). -// Arrays are sorted before comparison. -fn find_mismatch<'a>(expected: &'a Json, actual: &'a Json) -> Option<(&'a Json, &'a Json)> { - use rustc_serialize::json::Json::*; +// You can use `[..]` wildcard in strings (useful for OS dependent things such +// as paths). You can use a `"{...}"` string literal as a wildcard for +// arbitrary nested JSON (useful for parts of object emitted by other programs +// (e.g. rustc) rather than Cargo itself). Arrays are sorted before comparison. +fn find_mismatch<'a>(expected: &'a Value, actual: &'a Value) + -> Option<(&'a Value, &'a Value)> { + use serde_json::Value::*; match (expected, actual) { - (&I64(l), &I64(r)) if l == r => None, - (&F64(l), &F64(r)) if l == r => None, - (&U64(l), &U64(r)) if l == r => None, - (&Boolean(l), &Boolean(r)) if l == r => None, + (&Number(ref l), &Number(ref r)) if l == r => None, + (&Bool(l), &Bool(r)) if l == r => None, (&String(ref l), &String(ref r)) if lines_match(l, r) => None, (&Array(ref l), &Array(ref r)) => { if l.len() != r.len() { return Some((expected, actual)); } - fn sorted(xs: &Vec) -> Vec<&Json> { - let mut result = xs.iter().collect::>(); - result.sort_by(|x, y| x.partial_cmp(y).expect("JSON spec does not allow NaNs")); - result - } + let mut l = l.iter().collect::>(); + let mut r = r.iter().collect::>(); - sorted(l).iter().zip(sorted(r)) - .filter_map(|(l, r)| find_mismatch(l, r)) - .nth(0) + l.retain(|l| { + match r.iter().position(|r| find_mismatch(l, r).is_none()) { + Some(i) => { + r.remove(i); + false + } + None => true + } + }); + + if l.len() > 0 { + assert!(r.len() > 0); + Some((&l[0], &r[0])) + } else { + assert!(r.len() == 0); + None + } } (&Object(ref l), &Object(ref r)) => { let same_keys = l.len() == r.len() && l.keys().all(|k| r.contains_key(k)); diff -Nru cargo-0.17.0/tests/cargotest/support/registry.rs cargo-0.19.0/tests/cargotest/support/registry.rs --- cargo-0.17.0/tests/cargotest/support/registry.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/cargotest/support/registry.rs 2017-05-16 03:23:10.000000000 +0000 @@ -7,7 +7,6 @@ use flate2::write::GzEncoder; use git2; use rustc_serialize::hex::ToHex; -use rustc_serialize::json::ToJson; use tar::{Builder, Header}; use url::Url; @@ -137,29 +136,29 @@ // Figure out what we're going to write into the index let deps = self.deps.iter().map(|dep| { - let mut map = HashMap::new(); - map.insert("name".to_string(), dep.name.to_json()); - map.insert("req".to_string(), dep.vers.to_json()); - map.insert("features".to_string(), dep.features.to_json()); - map.insert("default_features".to_string(), true.to_json()); - map.insert("target".to_string(), dep.target.to_json()); - map.insert("optional".to_string(), false.to_json()); - map.insert("kind".to_string(), dep.kind.to_json()); - map + json!({ + "name": dep.name, + "req": dep.vers, + "features": dep.features, + "default_features": true, + "target": dep.target, + "optional": false, + "kind": dep.kind, + }) }).collect::>(); let cksum = { let mut c = Vec::new(); t!(t!(File::open(&self.archive_dst())).read_to_end(&mut c)); cksum(&c) }; - let mut dep = HashMap::new(); - dep.insert("name".to_string(), self.name.to_json()); - dep.insert("vers".to_string(), self.vers.to_json()); - dep.insert("deps".to_string(), deps.to_json()); - dep.insert("cksum".to_string(), cksum.to_json()); - dep.insert("features".to_string(), self.features.to_json()); - dep.insert("yanked".to_string(), self.yanked.to_json()); - let line = dep.to_json().to_string(); + let line = json!({ + "name": self.name, + "vers": self.vers, + "deps": deps, + "cksum": cksum, + "features": self.features, + "yanked": self.yanked, + }).to_string(); let file = match self.name.len() { 1 => format!("1/{}", self.name), diff -Nru cargo-0.17.0/tests/cfg.rs cargo-0.19.0/tests/cfg.rs --- cargo-0.17.0/tests/cfg.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/cfg.rs 2017-05-16 03:23:10.000000000 +0000 @@ -6,7 +6,7 @@ use std::fmt; use cargo::util::{Cfg, CfgExpr}; -use cargotest::{is_nightly, rustc_host}; +use cargotest::rustc_host; use cargotest::support::registry::Package; use cargotest::support::{project, execs}; use hamcrest::assert_that; @@ -138,8 +138,6 @@ #[test] fn cfg_easy() { - if !is_nightly() { return } - let p = project("foo") .file("Cargo.toml", r#" [package] @@ -166,8 +164,6 @@ #[test] fn dont_include() { - if !is_nightly() { return } - let other_family = if cfg!(unix) {"windows"} else {"unix"}; let p = project("foo") .file("Cargo.toml", &format!(r#" @@ -196,8 +192,6 @@ #[test] fn works_through_the_registry() { - if !is_nightly() { return } - Package::new("foo", "0.1.0").publish(); Package::new("bar", "0.1.0") .target_dep("foo", "0.1.0", "cfg(unix)") @@ -205,7 +199,7 @@ .publish(); let p = project("a") - .file("Cargo.toml", &r#" + .file("Cargo.toml", r#" [package] name = "a" version = "0.0.1" @@ -263,7 +257,7 @@ #[test] fn bad_target_spec() { let p = project("a") - .file("Cargo.toml", &r#" + .file("Cargo.toml", r#" [package] name = "a" version = "0.0.1" @@ -289,7 +283,7 @@ #[test] fn bad_target_spec2() { let p = project("a") - .file("Cargo.toml", &r#" + .file("Cargo.toml", r#" [package] name = "a" version = "0.0.1" @@ -314,8 +308,6 @@ #[test] fn multiple_match_ok() { - if !is_nightly() { return } - let p = project("foo") .file("Cargo.toml", &format!(r#" [package] @@ -351,8 +343,6 @@ #[test] fn any_ok() { - if !is_nightly() { return } - let p = project("foo") .file("Cargo.toml", r#" [package] diff -Nru cargo-0.17.0/tests/check.rs cargo-0.19.0/tests/check.rs --- cargo-0.17.0/tests/check.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/check.rs 2017-05-16 03:23:10.000000000 +0000 @@ -8,9 +8,6 @@ #[test] fn check_success() { - if !is_nightly() { - return - } let foo = project("foo") .file("Cargo.toml", r#" [package] @@ -45,9 +42,6 @@ #[test] fn check_fail() { - if !is_nightly() { - return - } let foo = project("foo") .file("Cargo.toml", r#" [package] @@ -143,10 +137,6 @@ #[test] fn check_build() { - if !is_nightly() { - return; - } - let foo = project("foo") .file("Cargo.toml", r#" [package] @@ -163,6 +153,8 @@ ::bar::baz(); } "#); + foo.build(); + let bar = project("bar") .file("Cargo.toml", r#" [package] @@ -175,18 +167,14 @@ "#); bar.build(); - assert_that(foo.cargo_process("check"), + assert_that(foo.cargo("check"), execs().with_status(0)); - assert_that(foo.cargo_process("build"), + assert_that(foo.cargo("build"), execs().with_status(0)); } #[test] fn build_check() { - if !is_nightly() { - return; - } - let foo = project("foo") .file("Cargo.toml", r#" [package] @@ -203,6 +191,8 @@ ::bar::baz(); } "#); + foo.build(); + let bar = project("bar") .file("Cargo.toml", r#" [package] @@ -215,9 +205,9 @@ "#); bar.build(); - assert_that(foo.cargo_process("build"), + assert_that(foo.cargo("build"), execs().with_status(0)); - assert_that(foo.cargo_process("check"), + assert_that(foo.cargo("check"), execs().with_status(0)); } @@ -225,10 +215,6 @@ // not built. #[test] fn issue_3418() { - if !is_nightly() { - return; - } - let foo = project("foo") .file("Cargo.toml", r#" [package] @@ -240,7 +226,6 @@ "#) .file("src/lib.rs", "") .file("src/main.rs", "fn main() {}"); - foo.build(); assert_that(foo.cargo_process("check").arg("-v"), execs().with_status(0) @@ -251,10 +236,6 @@ // checked, but in this case with a proc macro too. #[test] fn issue_3419() { - if !is_nightly() { - return; - } - let foo = project("foo") .file("Cargo.toml", r#" [package] @@ -304,9 +285,6 @@ // test `cargo rustc --profile check` #[test] fn rustc_check() { - if !is_nightly() { - return - } let foo = project("foo") .file("Cargo.toml", r#" [package] @@ -345,9 +323,6 @@ #[test] fn rustc_check_err() { - if !is_nightly() { - return - } let foo = project("foo") .file("Cargo.toml", r#" [package] @@ -383,3 +358,38 @@ .arg("--emit=metadata"), execs().with_status(101)); } + +#[test] +fn check_all() { + let foo = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.0.1" + authors = [] + + [workspace] + [dependencies] + b = { path = "b" } + "#) + .file("src/main.rs", "fn main() {}") + .file("examples/a.rs", "fn main() {}") + .file("tests/a.rs", "") + .file("src/lib.rs", "") + .file("b/Cargo.toml", r#" + [package] + name = "b" + version = "0.0.1" + authors = [] + "#) + .file("b/src/main.rs", "fn main() {}") + .file("b/src/lib.rs", ""); + + assert_that(foo.cargo_process("check").arg("--all").arg("-v"), + execs().with_status(0) + .with_stderr_contains("[..] --crate-name foo src[/]lib.rs [..]") + .with_stderr_contains("[..] --crate-name foo src[/]main.rs [..]") + .with_stderr_contains("[..] --crate-name b b[/]src[/]lib.rs [..]") + .with_stderr_contains("[..] --crate-name b b[/]src[/]main.rs [..]") + ); +} diff -Nru cargo-0.17.0/tests/clean.rs cargo-0.19.0/tests/clean.rs --- cargo-0.17.0/tests/clean.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/clean.rs 2017-05-16 03:23:10.000000000 +0000 @@ -74,7 +74,6 @@ name = "d2" "#) .file("d2/src/main.rs", "fn main() { println!(\"d2\"); }"); - p.build(); assert_that(p.cargo_process("build").arg("-p").arg("d1").arg("-p").arg("d2") .arg("-p").arg("foo"), @@ -120,7 +119,7 @@ .file("a/src/lib.rs", ""); p.build(); - assert_that(p.cargo_process("build").arg("--release"), + assert_that(p.cargo("build").arg("--release"), execs().with_status(0)); assert_that(p.cargo("clean").arg("-p").arg("foo"), @@ -164,7 +163,7 @@ .file("a/src/lib.rs", ""); p.build(); - assert_that(p.cargo_process("build").env("FIRST", "1"), + assert_that(p.cargo("build").env("FIRST", "1"), execs().with_status(0)); assert_that(p.cargo("clean").arg("-p").arg("foo"), execs().with_status(0)); @@ -203,7 +202,7 @@ .file("src/main.rs", "fn main() {}"); p.build(); - assert_that(p.cargo_process("build"), + assert_that(p.cargo("build"), execs().with_status(0)); assert_that(p.cargo("clean").arg("-p").arg("dep"), execs().with_status(0).with_stdout("")); @@ -228,7 +227,7 @@ Package::new("bar", "0.1.0").publish(); - assert_that(p.cargo_process("build"), + assert_that(p.cargo("build"), execs().with_status(0)); assert_that(p.cargo("clean").arg("-p").arg("bar"), execs().with_status(0).with_stdout("")); diff -Nru cargo-0.17.0/tests/cross-compile.rs cargo-0.19.0/tests/cross-compile.rs --- cargo-0.17.0/tests/cross-compile.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/cross-compile.rs 2017-05-16 03:23:10.000000000 +0000 @@ -548,17 +548,17 @@ .with_stderr(&format!("\ [COMPILING] foo v0.0.0 ({foo}) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] -[RUNNING] target[/]{triple}[/]debug[/]deps[/]bar-[..][EXE] -[RUNNING] target[/]{triple}[/]debug[/]deps[/]foo-[..][EXE]", foo = p.url(), triple = target)) +[RUNNING] target[/]{triple}[/]debug[/]deps[/]foo-[..][EXE] +[RUNNING] target[/]{triple}[/]debug[/]deps[/]bar-[..][EXE]", foo = p.url(), triple = target)) .with_stdout(" running 1 test -test test ... ok +test test_foo ... ok test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured running 1 test -test test_foo ... ok +test test ... ok test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured @@ -582,6 +582,7 @@ //! assert!(true); //! ``` "#); + p.build(); let host_output = format!("\ [COMPILING] foo v0.0.0 ({foo}) @@ -591,13 +592,13 @@ ", foo = p.url()); println!("a"); - assert_that(p.cargo_process("test"), + assert_that(p.cargo("test"), execs().with_status(0) .with_stderr(&host_output)); println!("b"); let target = host(); - assert_that(p.cargo_process("test").arg("--target").arg(&target), + assert_that(p.cargo("test").arg("--target").arg(&target), execs().with_status(0) .with_stderr(&format!("\ [COMPILING] foo v0.0.0 ({foo}) @@ -608,7 +609,7 @@ println!("c"); let target = alternate(); - assert_that(p.cargo_process("test").arg("--target").arg(&target), + assert_that(p.cargo("test").arg("--target").arg(&target), execs().with_status(0) .with_stderr(&format!("\ [COMPILING] foo v0.0.0 ({foo}) @@ -1046,8 +1047,9 @@ fn main() { println!("cargo:val=1") } "#) .file("d2/src/lib.rs", ""); + p.build(); - assert_that(p.cargo_process("build").arg("-v"), execs().with_status(0)); - assert_that(p.cargo_process("build").arg("-v").arg("--target").arg(&target), + assert_that(p.cargo("build").arg("-v"), execs().with_status(0)); + assert_that(p.cargo("build").arg("-v").arg("--target").arg(&target), execs().with_status(0)); } diff -Nru cargo-0.17.0/tests/directory.rs cargo-0.19.0/tests/directory.rs --- cargo-0.17.0/tests/directory.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/directory.rs 2017-05-16 03:23:10.000000000 +0000 @@ -10,6 +10,7 @@ use rustc_serialize::json; +use cargotest::cargo_process; use cargotest::support::{project, execs, ProjectBuilder}; use cargotest::support::paths; use cargotest::support::registry::{Package, cksum}; @@ -20,7 +21,6 @@ t!(fs::create_dir(&root.join(".cargo"))); t!(t!(File::create(root.join(".cargo/config"))).write_all(br#" [source.crates-io] - registry = 'https://wut' replace-with = 'my-awesome-local-registry' [source.my-awesome-local-registry] @@ -105,6 +105,144 @@ ")); } +#[test] +fn simple_install() { + setup(); + + VendorPackage::new("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.1.0" + authors = [] + "#) + .file("src/lib.rs", "pub fn foo() {}") + .build(); + + VendorPackage::new("bar") + .file("Cargo.toml", r#" + [package] + name = "bar" + version = "0.1.0" + authors = [] + + [dependencies] + foo = "0.1.0" + "#) + .file("src/main.rs", r#" + extern crate foo; + + pub fn main() { + foo::foo(); + } + "#) + .build(); + + assert_that(cargo_process().arg("install").arg("bar"), + execs().with_status(0).with_stderr( +" Installing bar v0.1.0 + Compiling foo v0.1.0 + Compiling bar v0.1.0 + Finished release [optimized] target(s) in [..] secs + Installing [..]bar[..] +warning: be sure to add `[..]` to your PATH to be able to run the installed binaries +")); +} + +#[test] +fn simple_install_fail() { + setup(); + + VendorPackage::new("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.1.0" + authors = [] + "#) + .file("src/lib.rs", "pub fn foo() {}") + .build(); + + VendorPackage::new("bar") + .file("Cargo.toml", r#" + [package] + name = "bar" + version = "0.1.0" + authors = [] + + [dependencies] + foo = "0.1.0" + baz = "9.8.7" + "#) + .file("src/main.rs", r#" + extern crate foo; + + pub fn main() { + foo::foo(); + } + "#) + .build(); + + assert_that(cargo_process().arg("install").arg("bar"), + execs().with_status(101).with_stderr( +" Installing bar v0.1.0 +error: failed to compile `bar v0.1.0`, intermediate artifacts can be found at `[..]` + +Caused by: + no matching package named `baz` found (required by `bar`) +location searched: registry https://github.com/rust-lang/crates.io-index +version required: ^9.8.7 +")); +} + +#[test] +fn install_without_feature_dep() { + setup(); + + VendorPackage::new("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.1.0" + authors = [] + "#) + .file("src/lib.rs", "pub fn foo() {}") + .build(); + + VendorPackage::new("bar") + .file("Cargo.toml", r#" + [package] + name = "bar" + version = "0.1.0" + authors = [] + + [dependencies] + foo = "0.1.0" + baz = { version = "9.8.7", optional = true } + + [features] + wantbaz = ["baz"] + "#) + .file("src/main.rs", r#" + extern crate foo; + + pub fn main() { + foo::foo(); + } + "#) + .build(); + + assert_that(cargo_process().arg("install").arg("bar"), + execs().with_status(0).with_stderr( +" Installing bar v0.1.0 + Compiling foo v0.1.0 + Compiling bar v0.1.0 + Finished release [optimized] target(s) in [..] secs + Installing [..]bar[..] +warning: be sure to add `[..]` to your PATH to be able to run the installed binaries +")); +} + #[test] fn not_there() { setup(); diff -Nru cargo-0.17.0/tests/doc.rs cargo-0.19.0/tests/doc.rs --- cargo-0.17.0/tests/doc.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/doc.rs 2017-05-16 03:23:10.000000000 +0000 @@ -4,7 +4,7 @@ use std::str; use std::fs; -use cargotest::{is_nightly, rustc_host}; +use cargotest::rustc_host; use cargotest::support::{project, execs, path2url}; use cargotest::support::registry::Package; use hamcrest::{assert_that, existing_file, existing_dir, is_not}; @@ -284,8 +284,6 @@ fn doc_target() { const TARGET: &'static str = "arm-unknown-linux-gnueabihf"; - if !is_nightly() { return } - let p = project("foo") .file("Cargo.toml", r#" [package] @@ -561,13 +559,15 @@ /// dox pub fn foo() {} "#); - assert_that(p.cargo_process("doc"), + p.build(); + + assert_that(p.cargo("doc"), execs().with_status(0)); assert_that(&p.root().join("target/doc/foo/index.html"), existing_file()); fs::remove_dir_all(p.root().join("target/doc/foo")).unwrap(); - assert_that(p.cargo_process("doc"), + assert_that(p.cargo("doc"), execs().with_status(0)); assert_that(&p.root().join("target/doc/foo/index.html"), existing_file()); } @@ -643,7 +643,6 @@ .file("bar/src/lib.rs", r#" pub fn bar() {} "#); - p.build(); // The order in which bar is compiled or documented is not deterministic assert_that(p.cargo_process("doc") @@ -677,7 +676,6 @@ .file("bar/src/lib.rs", r#" pub fn bar() {} "#); - p.build(); // The order in which foo and bar are documented is not guaranteed assert_that(p.cargo_process("doc") @@ -705,7 +703,6 @@ .file("a/src/lib.rs", r#" pub fn a() {} "#); - p.build(); Package::new("a", "0.1.0").publish(); diff -Nru cargo-0.17.0/tests/features.rs cargo-0.19.0/tests/features.rs --- cargo-0.17.0/tests/features.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/features.rs 2017-05-16 03:23:10.000000000 +0000 @@ -106,20 +106,21 @@ authors = [] "#) .file("bar/src/lib.rs", ""); + p.build(); - assert_that(p.cargo_process("build"), + assert_that(p.cargo("build"), execs().with_status(101).with_stderr("\ [ERROR] Package `bar v0.0.1 ([..])` does not have these features: `bar` ")); - let p = p.file("Cargo.toml", r#" - [project] - name = "foo" - version = "0.0.1" - authors = [] - "#); + p.change_file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + "#); - assert_that(p.cargo_process("build").arg("--features").arg("test"), + assert_that(p.cargo("build").arg("--features").arg("test"), execs().with_status(101).with_stderr("\ [ERROR] Package `foo v0.0.1 ([..])` does not have these features: `test` ")); @@ -938,25 +939,26 @@ #[cfg(feature = "some-feat")] pub fn test() { print!("test"); } "#); + p.build(); // The foo project requires that feature "some-feat" in "bar" is enabled. // Building without any features enabled should fail: - assert_that(p.cargo_process("build"), + assert_that(p.cargo("build"), execs().with_status(101)); // We should be able to enable the feature "derived-feat", which enables "some-feat", // on the command line. The feature is enabled, thus building should be successful: - assert_that(p.cargo_process("build").arg("--features").arg("derived/derived-feat"), + assert_that(p.cargo("build").arg("--features").arg("derived/derived-feat"), execs().with_status(0)); // Trying to enable features of transitive dependencies is an error - assert_that(p.cargo_process("build").arg("--features").arg("bar/some-feat"), + assert_that(p.cargo("build").arg("--features").arg("bar/some-feat"), execs().with_status(101).with_stderr("\ [ERROR] Package `foo v0.0.1 ([..])` does not have these features: `bar` ")); // Hierarchical feature specification should still be disallowed - assert_that(p.cargo_process("build").arg("--features").arg("derived/bar/some-feat"), + assert_that(p.cargo("build").arg("--features").arg("derived/bar/some-feat"), execs().with_status(101).with_stderr("\ [ERROR] feature names may not contain slashes: `bar/some-feat` ")); diff -Nru cargo-0.17.0/tests/freshness.rs cargo-0.19.0/tests/freshness.rs --- cargo-0.17.0/tests/freshness.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/freshness.rs 2017-05-16 03:23:10.000000000 +0000 @@ -680,3 +680,44 @@ [FINISHED] [..] ")); } + +#[test] +fn rebuild_if_environment_changes() { + let p = project("env_change") + .file("Cargo.toml", r#" + [package] + name = "env_change" + description = "old desc" + version = "0.0.1" + authors = [] + "#) + .file("src/main.rs", r#" + fn main() { + println!("{}", env!("CARGO_PKG_DESCRIPTION")); + } + "#); + + assert_that(p.cargo_process("run"), + execs().with_status(0) + .with_stdout("old desc").with_stderr(&format!("\ +[COMPILING] env_change v0.0.1 ({dir}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] `target[/]debug[/]env_change[EXE]` +", dir = p.url()))); + + File::create(&p.root().join("Cargo.toml")).unwrap().write_all(br#" + [package] + name = "env_change" + description = "new desc" + version = "0.0.1" + authors = [] + "#).unwrap(); + + assert_that(p.cargo("run"), + execs().with_status(0) + .with_stdout("new desc").with_stderr(&format!("\ +[COMPILING] env_change v0.0.1 ({dir}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] `target[/]debug[/]env_change[EXE]` +", dir = p.url()))); +} diff -Nru cargo-0.17.0/tests/git.rs cargo-0.19.0/tests/git.rs --- cargo-0.17.0/tests/git.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/git.rs 2017-05-16 03:23:10.000000000 +0000 @@ -8,7 +8,7 @@ use std::path::Path; use cargo::util::process; -use cargotest::{sleep_ms, RUSTC}; +use cargotest::sleep_ms; use cargotest::support::paths::{self, CargoPathExt}; use cargotest::support::{git, project, execs, main_file, path2url}; use hamcrest::{assert_that,existing_file}; @@ -752,6 +752,73 @@ } #[test] +fn dep_with_bad_submodule() { + let project = project("foo"); + let git_project = git::new("dep1", |project| { + project + .file("Cargo.toml", r#" + [package] + name = "dep1" + version = "0.5.0" + authors = ["carlhuda@example.com"] + "#) + }).unwrap(); + let git_project2 = git::new("dep2", |project| { + project.file("lib.rs", "pub fn dep() {}") + }).unwrap(); + + let repo = git2::Repository::open(&git_project.root()).unwrap(); + let url = path2url(git_project2.root()).to_string(); + git::add_submodule(&repo, &url, Path::new("src")); + git::commit(&repo); + + // now amend the first commit on git_project2 to make submodule ref point to not-found + // commit + let repo = git2::Repository::open(&git_project2.root()).unwrap(); + let original_submodule_ref = repo.refname_to_id("refs/heads/master").unwrap(); + let commit = repo.find_commit(original_submodule_ref).unwrap(); + commit.amend( + Some("refs/heads/master"), + None, + None, + None, + Some("something something"), + None).unwrap(); + + let project = project + .file("Cargo.toml", &format!(r#" + [project] + + name = "foo" + version = "0.5.0" + authors = ["wycats@example.com"] + + [dependencies.dep1] + + git = '{}' + "#, git_project.url())) + .file("src/lib.rs", " + extern crate dep1; + pub fn foo() { dep1::dep() } + "); + + let expected = format!("\ +[UPDATING] git repository [..] +[ERROR] failed to load source for a dependency on `dep1` + +Caused by: + Unable to update {} + +Caused by: + failed to update submodule `src` + +To learn more, run the command again with --verbose.\n", path2url(git_project.root())); + + assert_that(project.cargo_process("build"), + execs().with_stderr(expected).with_status(101)); +} + +#[test] fn two_deps_only_update_one() { let project = project("foo"); let git1 = git::new("dep1", |project| { @@ -912,7 +979,7 @@ let repo = git2::Repository::open(&git_project.root()).unwrap(); let mut sub = git::add_submodule(&repo, &git_project2.url().to_string(), - &Path::new("src")); + Path::new("src")); git::commit(&repo); let project = project @@ -1651,7 +1718,7 @@ let repo = git2::Repository::open(&git1.root()).unwrap(); let url = path2url(git2.root()).to_string(); - git::add_submodule(&repo, &url, &Path::new("a/submodule")); + git::add_submodule(&repo, &url, Path::new("a/submodule")); git::commit(&repo); git2::Repository::init(&project.root()).unwrap(); @@ -1743,9 +1810,6 @@ #[test] fn denied_lints_are_allowed() { - let enabled = RUSTC.with(|r| r.cap_lints); - if !enabled { return } - let a = git::new("a", |p| { p.file("Cargo.toml", r#" [project] @@ -1826,3 +1890,48 @@ assert_that(p.cargo("build"), execs().with_status(0)); } + +#[test] +fn two_at_rev_instead_of_tag() { + let git = git::new("git", |p| { + p.file("Cargo.toml", r#" + [project] + name = "git1" + version = "0.5.0" + authors = [] + "#) + .file("src/lib.rs", "") + .file("a/Cargo.toml", r#" + [project] + name = "git2" + version = "0.5.0" + authors = [] + "#) + .file("a/src/lib.rs", "") + }).unwrap(); + + // Make a tag corresponding to the current HEAD + let repo = git2::Repository::open(&git.root()).unwrap(); + let head = repo.head().unwrap().target().unwrap(); + repo.tag("v0.1.0", + &repo.find_object(head, None).unwrap(), + &repo.signature().unwrap(), + "make a new tag", + false).unwrap(); + + let p = project("foo") + .file("Cargo.toml", &format!(r#" + [package] + name = "foo" + version = "0.0.1" + authors = [] + + [dependencies] + git1 = {{ git = '{0}', rev = 'v0.1.0' }} + git2 = {{ git = '{0}', rev = 'v0.1.0' }} + "#, git.url())) + .file("src/lib.rs", ""); + + assert_that(p.cargo_process("generate-lockfile"), execs().with_status(0)); + assert_that(p.cargo("build").arg("-v"), execs().with_status(0)); +} diff -Nru cargo-0.17.0/tests/init.rs cargo-0.19.0/tests/init.rs --- cargo-0.17.0/tests/init.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/init.rs 2017-05-16 03:23:10.000000000 +0000 @@ -8,14 +8,14 @@ use std::env; use cargo::util::ProcessBuilder; -use cargotest::support::{execs, paths, cargo_dir}; +use cargotest::support::{execs, paths, cargo_exe}; use hamcrest::{assert_that, existing_file, existing_dir, is_not}; use tempdir::TempDir; fn cargo_process(s: &str) -> ProcessBuilder { - let mut p = cargotest::process(&cargo_dir().join("cargo")); + let mut p = cargotest::process(&cargo_exe()); p.arg(s).cwd(&paths::root()).env("HOME", &paths::home()); - return p; + p } #[test] @@ -57,7 +57,7 @@ #[test] fn both_lib_and_bin() { let td = TempDir::new("cargo").unwrap(); - assert_that(cargo_process("init").arg("--lib").arg("--bin").cwd(td.path().clone()) + assert_that(cargo_process("init").arg("--lib").arg("--bin").cwd(td.path()) .env("USER", "foo"), execs().with_status(101).with_stderr( "[ERROR] can't specify both lib and binary outputs")); @@ -427,7 +427,7 @@ fn no_filename() { assert_that(cargo_process("init").arg("/"), execs().with_status(101) - .with_stderr(&format!("\ + .with_stderr("\ [ERROR] cannot auto-detect project name from path \"/\" ; use --name to override -"))); +".to_string())); } diff -Nru cargo-0.17.0/tests/install.rs cargo-0.19.0/tests/install.rs --- cargo-0.17.0/tests/install.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/install.rs 2017-05-16 03:23:10.000000000 +0000 @@ -16,7 +16,7 @@ fn cargo_process(s: &str) -> ProcessBuilder { let mut p = cargotest::cargo_process(); p.arg(s); - return p + p } fn pkg(name: &str, vers: &str) { diff -Nru cargo-0.17.0/tests/metadata.rs cargo-0.19.0/tests/metadata.rs --- cargo-0.17.0/tests/metadata.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/metadata.rs 2017-05-16 03:23:10.000000000 +0000 @@ -8,6 +8,7 @@ #[test] fn cargo_metadata_simple() { let p = project("foo") + .file("src/foo.rs", "") .file("Cargo.toml", &basic_bin_manifest("foo")); assert_that(p.cargo_process("metadata"), execs().with_json(r#" @@ -21,11 +22,15 @@ "dependencies": [], "license": null, "license_file": null, + "description": null, "targets": [ { "kind": [ "bin" ], + "crate_types": [ + "bin" + ], "name": "foo", "src_path": "[..][/]foo[/]src[/]foo.rs" } @@ -48,10 +53,82 @@ }"#)); } +#[test] +fn cargo_metadata_warns_on_implicit_version() { + let p = project("foo") + .file("src/foo.rs", "") + .file("Cargo.toml", &basic_bin_manifest("foo")); + p.build(); + + assert_that(p.cargo("metadata"), + execs().with_stderr("\ +[WARNING] please specify `--format-version` flag explicitly to avoid compatibility problems")); + + assert_that(p.cargo("metadata").arg("--format-version").arg("1"), + execs().with_stderr("")); +} + +#[test] +fn library_with_several_crate_types() { + let p = project("foo") + .file("src/lib.rs", "") + .file("Cargo.toml", r#" +[package] +name = "foo" +version = "0.5.0" + +[lib] +crate-type = ["lib", "staticlib"] + "#); + + assert_that(p.cargo_process("metadata"), execs().with_json(r#" + { + "packages": [ + { + "name": "foo", + "version": "0.5.0", + "id": "foo[..]", + "source": null, + "dependencies": [], + "license": null, + "license_file": null, + "description": null, + "targets": [ + { + "kind": [ + "lib", + "staticlib" + ], + "crate_types": [ + "lib", + "staticlib" + ], + "name": "foo", + "src_path": "[..][/]foo[/]src[/]lib.rs" + } + ], + "features": {}, + "manifest_path": "[..]Cargo.toml" + } + ], + "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], + "resolve": { + "nodes": [ + { + "dependencies": [], + "id": "foo 0.5.0 (path+file:[..]foo)" + } + ], + "root": "foo 0.5.0 (path+file:[..]foo)" + }, + "version": 1 + }"#)); +} #[test] fn cargo_metadata_with_deps_and_version() { let p = project("foo") + .file("src/foo.rs", "") .file("Cargo.toml", r#" [project] name = "foo" @@ -84,11 +161,15 @@ "source": "registry+[..]", "license": null, "license_file": null, + "description": null, "targets": [ { "kind": [ "lib" ], + "crate_types": [ + "lib" + ], "name": "baz", "src_path": "[..]lib.rs" } @@ -115,11 +196,15 @@ "source": "registry+[..]", "license": null, "license_file": null, + "description": null, "targets": [ { "kind": [ "lib" ], + "crate_types": [ + "lib" + ], "name": "bar", "src_path": "[..]lib.rs" } @@ -146,11 +231,15 @@ "source": null, "license": "MIT", "license_file": null, + "description": "foo", "targets": [ { "kind": [ "bin" ], + "crate_types": [ + "bin" + ], "name": "foo", "src_path": "[..]foo.rs" } @@ -185,6 +274,127 @@ } #[test] +fn example() { + let p = project("foo") + .file("src/lib.rs", "") + .file("examples/ex.rs", "") + .file("Cargo.toml", r#" +[package] +name = "foo" +version = "0.1.0" + +[[example]] +name = "ex" + "#); + + assert_that(p.cargo_process("metadata"), execs().with_json(r#" + { + "packages": [ + { + "name": "foo", + "version": "0.1.0", + "id": "foo[..]", + "license": null, + "license_file": null, + "description": null, + "source": null, + "dependencies": [], + "targets": [ + { + "kind": [ "lib" ], + "crate_types": [ "lib" ], + "name": "foo", + "src_path": "[..][/]foo[/]src[/]lib.rs" + }, + { + "kind": [ "example" ], + "crate_types": [ "bin" ], + "name": "ex", + "src_path": "[..][/]foo[/]examples[/]ex.rs" + } + ], + "features": {}, + "manifest_path": "[..]Cargo.toml" + } + ], + "workspace_members": [ + "foo 0.1.0 (path+file:[..]foo)" + ], + "resolve": { + "root": "foo 0.1.0 (path+file://[..]foo)", + "nodes": [ + { + "id": "foo 0.1.0 (path+file:[..]foo)", + "dependencies": [] + } + ] + }, + "version": 1 + }"#)); +} + +#[test] +fn example_lib() { + let p = project("foo") + .file("src/lib.rs", "") + .file("examples/ex.rs", "") + .file("Cargo.toml", r#" +[package] +name = "foo" +version = "0.1.0" + +[[example]] +name = "ex" +crate-type = ["rlib", "dylib"] + "#); + + assert_that(p.cargo_process("metadata"), execs().with_json(r#" + { + "packages": [ + { + "name": "foo", + "version": "0.1.0", + "id": "foo[..]", + "license": null, + "license_file": null, + "description": null, + "source": null, + "dependencies": [], + "targets": [ + { + "kind": [ "lib" ], + "crate_types": [ "lib" ], + "name": "foo", + "src_path": "[..][/]foo[/]src[/]lib.rs" + }, + { + "kind": [ "example" ], + "crate_types": [ "rlib", "dylib" ], + "name": "ex", + "src_path": "[..][/]foo[/]examples[/]ex.rs" + } + ], + "features": {}, + "manifest_path": "[..]Cargo.toml" + } + ], + "workspace_members": [ + "foo 0.1.0 (path+file:[..]foo)" + ], + "resolve": { + "root": "foo 0.1.0 (path+file://[..]foo)", + "nodes": [ + { + "id": "foo 0.1.0 (path+file:[..]foo)", + "dependencies": [] + } + ] + }, + "version": 1 + }"#)); +} + +#[test] fn workspace_metadata() { let p = project("foo") .file("Cargo.toml", r#" @@ -195,7 +405,6 @@ .file("bar/src/lib.rs", "") .file("baz/Cargo.toml", &basic_lib_manifest("baz")) .file("baz/src/lib.rs", ""); - p.build(); assert_that(p.cargo_process("metadata"), execs().with_status(0).with_json(r#" { @@ -208,9 +417,11 @@ "dependencies": [], "license": null, "license_file": null, + "description": null, "targets": [ { "kind": [ "lib" ], + "crate_types": [ "lib" ], "name": "bar", "src_path": "[..]bar[/]src[/]lib.rs" } @@ -226,9 +437,11 @@ "dependencies": [], "license": null, "license_file": null, + "description": null, "targets": [ { "kind": [ "lib" ], + "crate_types": [ "lib" ], "name": "baz", "src_path": "[..]baz[/]src[/]lib.rs" } @@ -266,7 +479,6 @@ .file("bar/src/lib.rs", "") .file("baz/Cargo.toml", &basic_lib_manifest("baz")) .file("baz/src/lib.rs", ""); - p.build(); assert_that(p.cargo_process("metadata").arg("--no-deps"), execs().with_status(0).with_json(r#" { @@ -279,9 +491,11 @@ "dependencies": [], "license": null, "license_file": null, + "description": null, "targets": [ { "kind": [ "lib" ], + "crate_types": [ "lib" ], "name": "bar", "src_path": "[..]bar[/]src[/]lib.rs" } @@ -297,9 +511,11 @@ "dependencies": [], "license": null, "license_file": null, + "description": null, "targets": [ { "kind": [ "lib" ], + "crate_types": ["lib"], "name": "baz", "src_path": "[..]baz[/]src[/]lib.rs" } @@ -319,8 +535,8 @@ let p = project("foo") .file("Cargo.toml", ""); - assert_that(p.cargo_process("metadata"), execs().with_status(101) - .with_stderr("\ + assert_that(p.cargo_process("metadata").arg("--format-version").arg("1"), + execs().with_status(101).with_stderr("\ [ERROR] failed to parse manifest at `[..]` Caused by: @@ -338,8 +554,10 @@ "dependencies":[], "license": null, "license_file": null, + "description": null, "targets":[{ "kind":["bin"], + "crate_types":["bin"], "name":"foo", "src_path":"[..][/]foo[/]src[/]foo.rs" }], @@ -418,7 +636,7 @@ } #[test] -fn carg_metadata_bad_version() { +fn cargo_metadata_bad_version() { let p = project("foo") .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])); diff -Nru cargo-0.17.0/tests/new.rs cargo-0.19.0/tests/new.rs --- cargo-0.17.0/tests/new.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/new.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1,5 +1,6 @@ extern crate cargo; extern crate cargotest; +extern crate chrono; extern crate hamcrest; extern crate tempdir; @@ -16,7 +17,7 @@ fn cargo_process(s: &str) -> ProcessBuilder { let mut p = cargotest::cargo_process(); p.arg(s); - return p; + p } #[test] @@ -32,6 +33,17 @@ assert_that(&paths::root().join("foo/src/lib.rs"), existing_file()); assert_that(&paths::root().join("foo/.gitignore"), is_not(existing_file())); + let lib = paths::root().join("foo/src/lib.rs"); + let mut contents = String::new(); + File::open(&lib).unwrap().read_to_string(&mut contents).unwrap(); + assert_eq!(contents, r#"#[cfg(test)] +mod tests { + #[test] + fn it_works() { + } +} +"#); + assert_that(cargo_process("build").cwd(&paths::root().join("foo")), execs().with_status(0)); } @@ -58,7 +70,7 @@ #[test] fn both_lib_and_bin() { let td = TempDir::new("cargo").unwrap(); - assert_that(cargo_process("new").arg("--lib").arg("--bin").arg("foo").cwd(td.path().clone()) + assert_that(cargo_process("new").arg("--lib").arg("--bin").arg("foo").cwd(td.path()) .env("USER", "foo"), execs().with_status(101).with_stderr( "[ERROR] can't specify both lib and binary outputs")); @@ -67,7 +79,7 @@ #[test] fn simple_git() { let td = TempDir::new("cargo").unwrap(); - assert_that(cargo_process("new").arg("--lib").arg("foo").cwd(td.path().clone()) + assert_that(cargo_process("new").arg("--lib").arg("foo").cwd(td.path()) .env("USER", "foo"), execs().with_status(0)); @@ -77,7 +89,7 @@ assert_that(&td.path().join("foo/.git"), existing_dir()); assert_that(&td.path().join("foo/.gitignore"), existing_file()); - assert_that(cargo_process("build").cwd(&td.path().clone().join("foo")), + assert_that(cargo_process("build").cwd(&td.path().join("foo")), execs().with_status(0)); } @@ -168,7 +180,7 @@ // the hierarchy let td = TempDir::new("cargo").unwrap(); assert_that(cargo_process("new").arg("foo").env("USER", "foo") - .cwd(td.path().clone()), + .cwd(td.path()), execs().with_status(0)); let toml = td.path().join("foo/Cargo.toml"); @@ -183,7 +195,7 @@ // the hierarchy let td = TempDir::new("cargo").unwrap(); assert_that(cargo_process("new").arg("foo").env("USER", "foo \"bar\"") - .cwd(td.path().clone()), + .cwd(td.path()), execs().with_status(0)); let toml = td.path().join("foo/Cargo.toml"); @@ -200,7 +212,7 @@ assert_that(cargo_process("new").arg("foo") .env_remove("USER") .env("USERNAME", "foo") - .cwd(td.path().clone()), + .cwd(td.path()), execs().with_status(0)); let toml = td.path().join("foo/Cargo.toml"); @@ -219,7 +231,7 @@ .env("EMAIL", "baz2") .env("CARGO_NAME", "bar") .env("CARGO_EMAIL", "baz") - .cwd(td.path().clone()), + .cwd(td.path()), execs().with_status(0)); let toml = td.path().join("foo/Cargo.toml"); @@ -236,7 +248,7 @@ assert_that(cargo_process("new").arg("foo") .env("USER", "bar") .env("EMAIL", "baz") - .cwd(td.path().clone()), + .cwd(td.path()), execs().with_status(0)); let toml = td.path().join("foo/Cargo.toml"); @@ -266,7 +278,7 @@ assert_that(cargo_process("new").arg("foo") .env("GIT_AUTHOR_NAME", "foo") .env("GIT_AUTHOR_EMAIL", "gitfoo") - .cwd(td.path().clone()), + .cwd(td.path()), execs().with_status(0)); let toml = td.path().join("foo/Cargo.toml"); @@ -284,7 +296,7 @@ assert_that(cargo_process("new").arg("foo") .env_remove("USER") .env("GIT_COMMITTER_NAME", "gitfoo") - .cwd(td.path().clone()), + .cwd(td.path()), execs().with_status(0)); let toml = td.path().join("foo/Cargo.toml"); diff -Nru cargo-0.17.0/tests/overrides.rs cargo-0.19.0/tests/overrides.rs --- cargo-0.17.0/tests/overrides.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/overrides.rs 2017-05-16 03:23:10.000000000 +0000 @@ -928,6 +928,59 @@ } #[test] +fn no_warnings_when_replace_is_used_in_another_workspace_member() { + Package::new("foo", "0.1.0").publish(); + Package::new("bar", "0.1.0").publish(); + + let p = project("ws") + .file("Cargo.toml", r#" + [workspace] + members = [ "first_crate", "second_crate"] + + [replace] + "foo:0.1.0" = { path = "local_foo" }"#) + .file("first_crate/Cargo.toml", r#" + [package] + name = "first_crate" + version = "0.1.0" + + [dependencies] + foo = "0.1.0" + "#) + .file("first_crate/src/lib.rs", "") + .file("second_crate/Cargo.toml", r#" + [package] + name = "second_crate" + version = "0.1.0" + "#) + .file("second_crate/src/lib.rs", "") + .file("local_foo/Cargo.toml", r#" + [package] + name = "foo" + version = "0.1.0" + "#) + .file("local_foo/src/lib.rs", ""); + p.build(); + + assert_that(p.cargo("build").cwd(p.root().join("first_crate")), + execs().with_status(0) + .with_stdout("") + .with_stderr("\ +[UPDATING] registry `[..]` +[COMPILING] foo v0.1.0 ([..]) +[COMPILING] first_crate v0.1.0 ([..]) +[FINISHED] [..]")); + + assert_that(p.cargo("build").cwd(p.root().join("second_crate")), + execs().with_status(0) + .with_stdout("") + .with_stderr("\ +[COMPILING] second_crate v0.1.0 ([..]) +[FINISHED] [..]")); +} + + +#[test] fn override_to_path_dep() { Package::new("foo", "0.1.0").dep("bar", "0.1").publish(); Package::new("bar", "0.1.0").publish(); @@ -1104,3 +1157,96 @@ dependencies; the dependency on `bar` was either added or\ ")); } + +#[test] +fn override_with_default_feature() { + Package::new("another", "0.1.0").publish(); + Package::new("another", "0.1.1") + .dep("bar", "0.1") + .publish(); + Package::new("bar", "0.1.0").publish(); + + let p = project("local") + .file("Cargo.toml", r#" + [package] + name = "local" + version = "0.0.1" + authors = [] + + [dependencies] + bar = { path = "bar", default-features = false } + another = "0.1" + another2 = { path = "another2" } + + [replace] + 'bar:0.1.0' = { path = "bar" } + "#) + .file("src/main.rs", r#" + extern crate bar; + + fn main() { + bar::bar(); + } + "#) + .file("bar/Cargo.toml", r#" + [package] + name = "bar" + version = "0.1.0" + authors = [] + + [features] + default = [] + "#) + .file("bar/src/lib.rs", r#" + #[cfg(feature = "default")] + pub fn bar() {} + "#) + .file("another2/Cargo.toml", r#" + [package] + name = "another2" + version = "0.1.0" + authors = [] + + [dependencies] + bar = { version = "0.1", default-features = false } + "#) + .file("another2/src/lib.rs", ""); + + assert_that(p.cargo_process("run"), + execs().with_status(0)); +} + +#[test] +fn override_plus_dep() { + Package::new("bar", "0.1.0").publish(); + + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.0.1" + authors = [] + + [dependencies] + bar = "0.1" + + [replace] + 'bar:0.1.0' = { path = "bar" } + "#) + .file("src/lib.rs", "") + .file("bar/Cargo.toml", r#" + [package] + name = "bar" + version = "0.1.0" + authors = [] + + [dependencies] + foo = { path = ".." } + "#) + .file("bar/src/lib.rs", ""); + + assert_that(p.cargo_process("build"), + execs().with_status(101).with_stderr_contains("\ +error: cyclic package dependency: [..] +")); +} diff -Nru cargo-0.17.0/tests/package.rs cargo-0.19.0/tests/package.rs --- cargo-0.17.0/tests/package.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/package.rs 2017-05-16 03:23:10.000000000 +0000 @@ -6,12 +6,12 @@ extern crate tar; extern crate cargo; -use std::fs::{File, OpenOptions}; +use std::fs::File; use std::io::prelude::*; use std::path::{Path, PathBuf}; use cargotest::{cargo_process, process}; -use cargotest::support::{project, execs, paths, git, path2url, cargo_dir}; +use cargotest::support::{project, execs, paths, git, path2url, cargo_exe}; use flate2::read::GzDecoder; use hamcrest::{assert_that, existing_file, contains}; use tar::Archive; @@ -360,9 +360,9 @@ fn main() {} "#); p.build(); - File::create(p.root().join("src/main.rs")).unwrap().write_all(r#" + File::create(p.root().join("src/main.rs")).unwrap().write_all(br#" fn main() { println!("A change!"); } - "#.as_bytes()).unwrap(); + "#).unwrap(); let mut cargo = cargo_process(); cargo.cwd(p.root()); assert_that(cargo.clone().arg("build"), execs().with_status(0)); @@ -476,12 +476,12 @@ panic!("could not create file {}: {}", p.root().join("src/foo.rs").display(), e) }); - file.write_all(r#" + file.write_all(br#" fn main() { println!("foo"); } - "#.as_bytes()).unwrap(); + "#).unwrap(); std::mem::drop(file); - let mut pro = process(&cargo_dir().join("cargo")); + let mut pro = process(&cargo_exe()); pro.arg("package").cwd(p.root()); // Check that cargo rebuilds the tarball @@ -546,6 +546,9 @@ #[test] fn do_not_package_if_repository_is_dirty() { + let p = project("foo"); + p.build(); + // Create a Git repository containing a minimal Rust project. git::repo(&paths::root().join("foo")) .file("Cargo.toml", r#" @@ -562,15 +565,23 @@ .build(); // Modify Cargo.toml without committing the change. - let p = project("foo"); - let manifest_path = p.root().join("Cargo.toml"); - let mut manifest = t!(OpenOptions::new().append(true).open(manifest_path)); - t!(writeln!(manifest, "")); + p.change_file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + license = "MIT" + description = "foo" + documentation = "foo" + homepage = "foo" + repository = "foo" + # change + "#); assert_that(p.cargo("package"), execs().with_status(101) .with_stderr("\ -error: 1 dirty files found in the working directory: +error: 1 files in the working directory contain changes that were not yet \ +committed into git: Cargo.toml diff -Nru cargo-0.17.0/tests/path.rs cargo-0.19.0/tests/path.rs --- cargo-0.17.0/tests/path.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/path.rs 2017-05-16 03:23:10.000000000 +0000 @@ -259,8 +259,7 @@ #[test] fn no_rebuild_dependency() { - let mut p = project("foo"); - p = p + let p = project("foo") .file("Cargo.toml", r#" [project] @@ -298,19 +297,18 @@ in [..]\n", p.url(), p.url()))); - // This time we shouldn't compile bar - assert_that(p.cargo("build"), - execs().with_stdout("")); - p.root().move_into_the_past(); - p.build(); // rebuild the files (rewriting them in the process) + sleep_ms(1000); + p.change_file("src/foo.rs", r#" + extern crate bar; + fn main() { bar::bar(); } + "#); + // Don't compile bar, but do recompile foo. assert_that(p.cargo("build"), - execs().with_stderr(&format!("[COMPILING] bar v0.5.0 ({}/bar)\n\ - [COMPILING] foo v0.5.0 ({})\n\ - [FINISHED] dev [unoptimized + debuginfo] target(s) \ - in [..]\n", - p.url(), - p.url()))); + execs().with_stderr("\ + [COMPILING] foo v0.5.0 ([..])\n\ + [FINISHED] dev [unoptimized + debuginfo] target(s) \ + in [..]\n")); } #[test] diff -Nru cargo-0.17.0/tests/proc-macro.rs cargo-0.19.0/tests/proc-macro.rs --- cargo-0.17.0/tests/proc-macro.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/proc-macro.rs 2017-05-16 03:23:10.000000000 +0000 @@ -214,7 +214,6 @@ assert!(true); } "#); - foo.build(); assert_that(foo.cargo_process("test"), execs().with_status(0) diff -Nru cargo-0.17.0/tests/profiles.rs cargo-0.19.0/tests/profiles.rs --- cargo-0.17.0/tests/profiles.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/profiles.rs 2017-05-16 03:23:10.000000000 +0000 @@ -245,7 +245,6 @@ opt-level = 1 "#) .file("bar/src/main.rs", "fn main() {}"); - p.build(); assert_that(p.cargo_process("build").cwd(p.root().join("bar")).arg("-v"), execs().with_status(0).with_stderr("\ @@ -277,7 +276,6 @@ workspace = ".." "#) .file("bar/src/main.rs", "fn main() {}"); - p.build(); assert_that(p.cargo_process("build").cwd(p.root().join("bar")).arg("-v"), execs().with_status(0).with_stderr("\ diff -Nru cargo-0.17.0/tests/publish.rs cargo-0.19.0/tests/publish.rs --- cargo-0.17.0/tests/publish.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/publish.rs 2017-05-16 03:23:10.000000000 +0000 @@ -26,10 +26,10 @@ fn setup() { let config = paths::root().join(".cargo/config"); t!(fs::create_dir_all(config.parent().unwrap())); - t!(t!(File::create(&config)).write_all(&format!(r#" + t!(t!(File::create(&config)).write_all(br#" [registry] token = "api-token" - "#).as_bytes())); + "#)); t!(fs::create_dir_all(&upload_path().join("api/v1/crates"))); repo(®istry_path()) @@ -184,6 +184,9 @@ #[test] fn dont_publish_dirty() { setup(); + let p = project("foo") + .file("bar", ""); + p.build(); repo(&paths::root().join("foo")) .file("Cargo.toml", r#" @@ -200,13 +203,12 @@ .file("src/main.rs", "fn main() {}") .build(); - let p = project("foo"); - t!(File::create(p.root().join("bar"))); assert_that(p.cargo("publish") .arg("--host").arg(registry().to_string()), execs().with_status(101).with_stderr("\ [UPDATING] registry `[..]` -error: 1 dirty files found in the working directory: +error: 1 files in the working directory contain changes that were not yet \ +committed into git: bar @@ -218,6 +220,9 @@ fn publish_clean() { setup(); + let p = project("foo"); + p.build(); + repo(&paths::root().join("foo")) .file("Cargo.toml", r#" [project] @@ -233,7 +238,6 @@ .file("src/main.rs", "fn main() {}") .build(); - let p = project("foo"); assert_that(p.cargo("publish") .arg("--host").arg(registry().to_string()), execs().with_status(0)); @@ -243,6 +247,10 @@ fn publish_in_sub_repo() { setup(); + let p = project("foo") + .file("baz", ""); + p.build(); + repo(&paths::root().join("foo")) .file("bar/Cargo.toml", r#" [project] @@ -258,8 +266,6 @@ .file("bar/src/main.rs", "fn main() {}") .build(); - let p = project("foo"); - t!(File::create(p.root().join("baz"))); assert_that(p.cargo("publish").cwd(p.root().join("bar")) .arg("--host").arg(registry().to_string()), execs().with_status(0)); @@ -269,6 +275,10 @@ fn publish_when_ignored() { setup(); + let p = project("foo") + .file("baz", ""); + p.build(); + repo(&paths::root().join("foo")) .file("Cargo.toml", r#" [project] @@ -285,8 +295,6 @@ .file(".gitignore", "baz") .build(); - let p = project("foo"); - t!(File::create(p.root().join("baz"))); assert_that(p.cargo("publish") .arg("--host").arg(registry().to_string()), execs().with_status(0)); @@ -296,6 +304,10 @@ fn ignore_when_crate_ignored() { setup(); + let p = project("foo") + .file("bar/baz", ""); + p.build(); + repo(&paths::root().join("foo")) .file(".gitignore", "bar") .nocommit_file("bar/Cargo.toml", r#" @@ -310,8 +322,6 @@ repository = "foo" "#) .nocommit_file("bar/src/main.rs", "fn main() {}"); - let p = project("foo"); - t!(File::create(p.root().join("bar/baz"))); assert_that(p.cargo("publish").cwd(p.root().join("bar")) .arg("--host").arg(registry().to_string()), execs().with_status(0)); @@ -321,6 +331,10 @@ fn new_crate_rejected() { setup(); + let p = project("foo") + .file("baz", ""); + p.build(); + repo(&paths::root().join("foo")) .nocommit_file("Cargo.toml", r#" [project] @@ -334,8 +348,6 @@ repository = "foo" "#) .nocommit_file("src/main.rs", "fn main() {}"); - let p = project("foo"); - t!(File::create(p.root().join("baz"))); assert_that(p.cargo("publish") .arg("--host").arg(registry().to_string()), execs().with_status(101)); diff -Nru cargo-0.17.0/tests/read-manifest.rs cargo-0.19.0/tests/read-manifest.rs --- cargo-0.17.0/tests/read-manifest.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/read-manifest.rs 2017-05-16 03:23:10.000000000 +0000 @@ -11,10 +11,12 @@ "id":"foo[..]0.5.0[..](path+file://[..]/foo)", "license": null, "license_file": null, + "description": null, "source":null, "dependencies":[], "targets":[{ "kind":["bin"], + "crate_types":["bin"], "name":"foo", "src_path":"[..][/]foo[/]src[/]foo.rs" }], diff -Nru cargo-0.17.0/tests/registry.rs cargo-0.19.0/tests/registry.rs --- cargo-0.17.0/tests/registry.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/registry.rs 2017-05-16 03:23:10.000000000 +0000 @@ -31,10 +31,11 @@ bar = ">= 0.0.0" "#) .file("src/main.rs", "fn main() {}"); + p.build(); Package::new("bar", "0.0.1").publish(); - assert_that(p.cargo_process("build"), + assert_that(p.cargo("build"), execs().with_status(0).with_stderr(&format!("\ [UPDATING] registry `{reg}` [DOWNLOADING] bar v0.0.1 (registry file://[..]) @@ -45,16 +46,16 @@ dir = p.url(), reg = registry::registry()))); + assert_that(p.cargo("clean"), execs().with_status(0)); + // Don't download a second time - assert_that(p.cargo_process("build"), + assert_that(p.cargo("build"), execs().with_status(0).with_stderr(&format!("\ -[UPDATING] registry `{reg}` [COMPILING] bar v0.0.1 [COMPILING] foo v0.0.1 ({dir}) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] secs ", - dir = p.url(), - reg = registry::registry()))); + dir = p.url()))); } #[test] @@ -126,26 +127,25 @@ foo = ">= 1.0.0" "#) .file("src/main.rs", "fn main() {}"); + p.build(); Package::new("foo", "0.0.1").publish(); Package::new("foo", "0.0.2").publish(); - assert_that(p.cargo_process("build"), + assert_that(p.cargo("build"), execs().with_status(101).with_stderr_contains("\ -[ERROR] no matching package named `foo` found (required by `foo`) +[ERROR] no matching version `>= 1.0.0` found for package `foo` (required by `foo`) location searched: registry [..] -version required: >= 1.0.0 versions found: 0.0.2, 0.0.1 ")); Package::new("foo", "0.0.3").publish(); Package::new("foo", "0.0.4").publish(); - assert_that(p.cargo_process("build"), + assert_that(p.cargo("build"), execs().with_status(101).with_stderr_contains("\ -[ERROR] no matching package named `foo` found (required by `foo`) +[ERROR] no matching version `>= 1.0.0` found for package `foo` (required by `foo`) location searched: registry [..] -version required: >= 1.0.0 versions found: 0.0.4, 0.0.3, 0.0.2, ... ")); } @@ -397,9 +397,8 @@ assert_that(p.cargo("build"), execs().with_status(101).with_stderr_contains("\ -[ERROR] no matching package named `baz` found (required by `bar`) +[ERROR] no matching version `= 0.0.2` found for package `baz` (required by `bar`) location searched: registry [..] -version required: = 0.0.2 versions found: 0.0.1 ")); } @@ -1315,17 +1314,17 @@ p.build(); Package::new("remote", "0.3.0") - .file("Cargo.toml", r#" - [project] + .file("Cargo.toml", r#" + [project] name = "remote" version = "0.3.0" authors = [] [dependencies] bar = "0.2*" - "#) + "#) .file("src/lib.rs", "") - .publish(); + .publish(); Package::new("bar", "0.2.0").publish(); assert_that(p.cargo("build"), @@ -1354,17 +1353,17 @@ Package::new("foo", "0.2.0").publish(); Package::new("bar", "0.3.0") .dep("foo", "0.2.0") - .file("Cargo.toml", r#" - [project] + .file("Cargo.toml", r#" + [project] name = "bar" version = "0.3.0" authors = [] [dependencies] foo = "0.1.0" - "#) + "#) .file("src/lib.rs", "extern crate foo;") - .publish(); + .publish(); let p = project("foo") .file("Cargo.toml", r#" @@ -1382,3 +1381,30 @@ assert_that(p.cargo("build").arg("-v"), execs().with_status(0)); } + +#[test] +fn vv_prints_warnings() { + Package::new("foo", "0.2.0") + .file("src/lib.rs", r#" + #![deny(warnings)] + + fn foo() {} // unused function + "#) + .publish(); + + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "fo" + version = "0.5.0" + authors = [] + + [dependencies] + foo = "0.2" + "#) + .file("src/main.rs", "fn main() {}"); + p.build(); + + assert_that(p.cargo("build").arg("-vv"), + execs().with_status(0)); +} diff -Nru cargo-0.17.0/tests/required-features.rs cargo-0.19.0/tests/required-features.rs --- cargo-0.17.0/tests/required-features.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/tests/required-features.rs 2017-05-16 03:23:10.000000000 +0000 @@ -0,0 +1,1106 @@ +extern crate cargotest; +extern crate hamcrest; + +use cargotest::is_nightly; +use cargotest::install::{cargo_home, has_installed_exe}; +use cargotest::support::{project, execs}; +use hamcrest::{assert_that, existing_file, not}; + +#[test] +fn build_bin_default_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a"] + a = [] + + [[bin]] + name = "foo" + required-features = ["a"] + "#) + .file("src/main.rs", r#" + extern crate foo; + + #[cfg(feature = "a")] + fn test() { + foo::foo(); + } + + fn main() {} + "#) + .file("src/lib.rs", r#" + #[cfg(feature = "a")] + pub fn foo() {} + "#); + p.build(); + + assert_that(p.cargo("build"), + execs().with_status(0)); + assert_that(&p.bin("foo"), existing_file()); + + assert_that(p.cargo("build").arg("--no-default-features"), + execs().with_status(0)); + + assert_that(p.cargo("build").arg("--bin=foo"), + execs().with_status(0)); + assert_that(&p.bin("foo"), existing_file()); + + assert_that(p.cargo("build").arg("--bin=foo").arg("--no-default-features"), + execs().with_status(101).with_stderr("\ +error: target `foo` requires the features: `a` +Consider enabling them by passing e.g. `--features=\"a\"` +")); +} + +#[test] +fn build_bin_arg_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + a = [] + + [[bin]] + name = "foo" + required-features = ["a"] + "#) + .file("src/main.rs", "fn main() {}"); + p.build(); + + assert_that(p.cargo("build").arg("--features").arg("a"), + execs().with_status(0)); + assert_that(&p.bin("foo"), existing_file()); +} + +#[test] +fn build_bin_multiple_required_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a", "b"] + a = [] + b = ["a"] + c = [] + + [[bin]] + name = "foo_1" + path = "src/foo_1.rs" + required-features = ["b", "c"] + + [[bin]] + name = "foo_2" + path = "src/foo_2.rs" + required-features = ["a"] + "#) + .file("src/foo_1.rs", "fn main() {}") + .file("src/foo_2.rs", "fn main() {}"); + p.build(); + + assert_that(p.cargo("build"), + execs().with_status(0)); + + assert_that(&p.bin("foo_1"), not(existing_file())); + assert_that(&p.bin("foo_2"), existing_file()); + + assert_that(p.cargo("build").arg("--features").arg("c"), + execs().with_status(0)); + + assert_that(&p.bin("foo_1"), existing_file()); + assert_that(&p.bin("foo_2"), existing_file()); + + assert_that(p.cargo("build").arg("--no-default-features"), + execs().with_status(0)); +} + +#[test] +fn build_example_default_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a"] + a = [] + + [[example]] + name = "foo" + required-features = ["a"] + "#) + .file("examples/foo.rs", "fn main() {}"); + p.build(); + + assert_that(p.cargo("build").arg("--example=foo"), + execs().with_status(0)); + assert_that(&p.bin("examples/foo"), existing_file()); + + assert_that(p.cargo("build").arg("--example=foo").arg("--no-default-features"), + execs().with_status(101).with_stderr("\ +error: target `foo` requires the features: `a` +Consider enabling them by passing e.g. `--features=\"a\"` +")); +} + +#[test] +fn build_example_arg_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + a = [] + + [[example]] + name = "foo" + required-features = ["a"] + "#) + .file("examples/foo.rs", "fn main() {}"); + p.build(); + + assert_that(p.cargo("build").arg("--example=foo").arg("--features").arg("a"), + execs().with_status(0)); + assert_that(&p.bin("examples/foo"), existing_file()); +} + +#[test] +fn build_example_multiple_required_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a", "b"] + a = [] + b = ["a"] + c = [] + + [[example]] + name = "foo_1" + required-features = ["b", "c"] + + [[example]] + name = "foo_2" + required-features = ["a"] + "#) + .file("examples/foo_1.rs", "fn main() {}") + .file("examples/foo_2.rs", "fn main() {}"); + p.build(); + + assert_that(p.cargo("build").arg("--example=foo_1"), + execs().with_status(101).with_stderr("\ +error: target `foo_1` requires the features: `b`, `c` +Consider enabling them by passing e.g. `--features=\"b c\"` +")); + assert_that(p.cargo("build").arg("--example=foo_2"), + execs().with_status(0)); + + assert_that(&p.bin("examples/foo_1"), not(existing_file())); + assert_that(&p.bin("examples/foo_2"), existing_file()); + + assert_that(p.cargo("build").arg("--example=foo_1") + .arg("--features").arg("c"), + execs().with_status(0)); + assert_that(p.cargo("build").arg("--example=foo_2") + .arg("--features").arg("c"), + execs().with_status(0)); + + assert_that(&p.bin("examples/foo_1"), existing_file()); + assert_that(&p.bin("examples/foo_2"), existing_file()); + + assert_that(p.cargo("build").arg("--example=foo_1") + .arg("--no-default-features"), + execs().with_status(101).with_stderr("\ +error: target `foo_1` requires the features: `b`, `c` +Consider enabling them by passing e.g. `--features=\"b c\"` +")); + assert_that(p.cargo("build").arg("--example=foo_2") + .arg("--no-default-features"), + execs().with_status(101).with_stderr("\ +error: target `foo_2` requires the features: `a` +Consider enabling them by passing e.g. `--features=\"a\"` +")); +} + +#[test] +fn test_default_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a"] + a = [] + + [[test]] + name = "foo" + required-features = ["a"] + "#) + .file("tests/foo.rs", "#[test]\nfn test() {}"); + p.build(); + + assert_that(p.cargo("test"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] target[/]debug[/]deps[/]foo-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test test ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + +")); + + assert_that(p.cargo("test").arg("--no-default-features"), + execs().with_status(0).with_stderr(format!("\ +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")) + .with_stdout("")); + + assert_that(p.cargo("test").arg("--test=foo"), + execs().with_status(0).with_stderr(format!("\ +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] target[/]debug[/]deps[/]foo-[..][EXE]")) + .with_stdout(" +running 1 test +test test ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + +")); + + assert_that(p.cargo("test").arg("--test=foo").arg("--no-default-features"), + execs().with_status(101).with_stderr("\ +error: target `foo` requires the features: `a` +Consider enabling them by passing e.g. `--features=\"a\"` +")); +} + +#[test] +fn test_arg_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + a = [] + + [[test]] + name = "foo" + required-features = ["a"] + "#) + .file("tests/foo.rs", "#[test]\nfn test() {}"); + p.build(); + + assert_that(p.cargo("test").arg("--features").arg("a"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] target[/]debug[/]deps[/]foo-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test test ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + +")); +} + +#[test] +fn test_multiple_required_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a", "b"] + a = [] + b = ["a"] + c = [] + + [[test]] + name = "foo_1" + required-features = ["b", "c"] + + [[test]] + name = "foo_2" + required-features = ["a"] + "#) + .file("tests/foo_1.rs", "#[test]\nfn test() {}") + .file("tests/foo_2.rs", "#[test]\nfn test() {}"); + p.build(); + + assert_that(p.cargo("test"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] target[/]debug[/]deps[/]foo_2-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test test ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + +")); + + assert_that(p.cargo("test").arg("--features").arg("c"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] target[/]debug[/]deps[/]foo_1-[..][EXE] +[RUNNING] target[/]debug[/]deps[/]foo_2-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test test ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + + +running 1 test +test test ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + +")); + + assert_that(p.cargo("test").arg("--no-default-features"), + execs().with_status(0).with_stderr(format!("\ +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")) + .with_stdout("")); +} + +#[test] +fn bench_default_features() { + if !is_nightly() { + return; + } + + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a"] + a = [] + + [[bench]] + name = "foo" + required-features = ["a"] + "#) + .file("benches/foo.rs", r#" + #![feature(test)] + extern crate test; + + #[bench] + fn bench(_: &mut test::Bencher) { + }"#); + p.build(); + + assert_that(p.cargo("bench"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] release [optimized] target(s) in [..] +[RUNNING] target[/]release[/]deps[/]foo-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test bench ... bench: [..] 0 ns/iter (+/- 0) + +test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured + +")); + + assert_that(p.cargo("bench").arg("--no-default-features"), + execs().with_status(0).with_stderr(format!("\ +[FINISHED] release [optimized] target(s) in [..]")) + .with_stdout("")); + + assert_that(p.cargo("bench").arg("--bench=foo"), + execs().with_status(0).with_stderr(format!("\ +[FINISHED] release [optimized] target(s) in [..] +[RUNNING] target[/]release[/]deps[/]foo-[..][EXE]")) + .with_stdout(" +running 1 test +test bench ... bench: [..] 0 ns/iter (+/- 0) + +test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured + +")); + + assert_that(p.cargo("bench").arg("--bench=foo").arg("--no-default-features"), + execs().with_status(101).with_stderr("\ +error: target `foo` requires the features: `a` +Consider enabling them by passing e.g. `--features=\"a\"` +")); +} + +#[test] +fn bench_arg_features() { + if !is_nightly() { + return; + } + + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + a = [] + + [[bench]] + name = "foo" + required-features = ["a"] + "#) + .file("benches/foo.rs", r#" + #![feature(test)] + extern crate test; + + #[bench] + fn bench(_: &mut test::Bencher) { + }"#); + p.build(); + + assert_that(p.cargo("bench").arg("--features").arg("a"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] release [optimized] target(s) in [..] +[RUNNING] target[/]release[/]deps[/]foo-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test bench ... bench: [..] 0 ns/iter (+/- 0) + +test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured + +")); +} + +#[test] +fn bench_multiple_required_features() { + if !is_nightly() { + return; + } + + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a", "b"] + a = [] + b = ["a"] + c = [] + + [[bench]] + name = "foo_1" + required-features = ["b", "c"] + + [[bench]] + name = "foo_2" + required-features = ["a"] + "#) + .file("benches/foo_1.rs", r#" + #![feature(test)] + extern crate test; + + #[bench] + fn bench(_: &mut test::Bencher) { + }"#) + .file("benches/foo_2.rs", r#" + #![feature(test)] + extern crate test; + + #[bench] + fn bench(_: &mut test::Bencher) { + }"#); + p.build(); + + assert_that(p.cargo("bench"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] release [optimized] target(s) in [..] +[RUNNING] target[/]release[/]deps[/]foo_2-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test bench ... bench: [..] 0 ns/iter (+/- 0) + +test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured + +")); + + assert_that(p.cargo("bench").arg("--features").arg("c"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] release [optimized] target(s) in [..] +[RUNNING] target[/]release[/]deps[/]foo_1-[..][EXE] +[RUNNING] target[/]release[/]deps[/]foo_2-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test bench ... bench: [..] 0 ns/iter (+/- 0) + +test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured + + +running 1 test +test bench ... bench: [..] 0 ns/iter (+/- 0) + +test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured + +")); + + assert_that(p.cargo("bench").arg("--no-default-features"), + execs().with_status(0).with_stderr(format!("\ +[FINISHED] release [optimized] target(s) in [..]")) + .with_stdout("")); +} + +#[test] +fn install_default_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a"] + a = [] + + [[bin]] + name = "foo" + required-features = ["a"] + + [[example]] + name = "foo" + required-features = ["a"] + "#) + .file("src/main.rs", "fn main() {}") + .file("examples/foo.rs", "fn main() {}"); + p.build(); + + assert_that(p.cargo("install"), + execs().with_status(0)); + assert_that(cargo_home(), has_installed_exe("foo")); + assert_that(p.cargo("uninstall").arg("foo"), + execs().with_status(0)); + + assert_that(p.cargo("install").arg("--no-default-features"), + execs().with_status(101).with_stderr(format!("\ +[INSTALLING] foo v0.0.1 ([..]) +[FINISHED] release [optimized] target(s) in [..] +[ERROR] no binaries are available for install using the selected features +"))); + assert_that(cargo_home(), not(has_installed_exe("foo"))); + + assert_that(p.cargo("install").arg("--bin=foo"), + execs().with_status(0)); + assert_that(cargo_home(), has_installed_exe("foo")); + assert_that(p.cargo("uninstall").arg("foo"), + execs().with_status(0)); + + assert_that(p.cargo("install").arg("--bin=foo").arg("--no-default-features"), + execs().with_status(101).with_stderr(format!("\ +[INSTALLING] foo v0.0.1 ([..]) +[ERROR] failed to compile `foo v0.0.1 ([..])`, intermediate artifacts can be found at \ + `[..]target` + +Caused by: + target `foo` requires the features: `a` +Consider enabling them by passing e.g. `--features=\"a\"` +"))); + assert_that(cargo_home(), not(has_installed_exe("foo"))); + + assert_that(p.cargo("install").arg("--example=foo"), + execs().with_status(0)); + assert_that(cargo_home(), has_installed_exe("foo")); + assert_that(p.cargo("uninstall").arg("foo"), + execs().with_status(0)); + + assert_that(p.cargo("install").arg("--example=foo").arg("--no-default-features"), + execs().with_status(101).with_stderr(format!("\ +[INSTALLING] foo v0.0.1 ([..]) +[ERROR] failed to compile `foo v0.0.1 ([..])`, intermediate artifacts can be found at \ + `[..]target` + +Caused by: + target `foo` requires the features: `a` +Consider enabling them by passing e.g. `--features=\"a\"` +"))); + assert_that(cargo_home(), not(has_installed_exe("foo"))); +} + +#[test] +fn install_arg_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + a = [] + + [[bin]] + name = "foo" + required-features = ["a"] + "#) + .file("src/main.rs", "fn main() {}"); + p.build(); + + assert_that(p.cargo("install").arg("--features").arg("a"), + execs().with_status(0)); + assert_that(cargo_home(), has_installed_exe("foo")); + assert_that(p.cargo("uninstall").arg("foo"), + execs().with_status(0)); +} + +#[test] +fn install_multiple_required_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a", "b"] + a = [] + b = ["a"] + c = [] + + [[bin]] + name = "foo_1" + path = "src/foo_1.rs" + required-features = ["b", "c"] + + [[bin]] + name = "foo_2" + path = "src/foo_2.rs" + required-features = ["a"] + "#) + .file("src/foo_1.rs", "fn main() {}") + .file("src/foo_2.rs", "fn main() {}"); + p.build(); + + assert_that(p.cargo("install"), + execs().with_status(0)); + assert_that(cargo_home(), not(has_installed_exe("foo_1"))); + assert_that(cargo_home(), has_installed_exe("foo_2")); + assert_that(p.cargo("uninstall").arg("foo"), + execs().with_status(0)); + + assert_that(p.cargo("install").arg("--features").arg("c"), + execs().with_status(0)); + assert_that(cargo_home(), has_installed_exe("foo_1")); + assert_that(cargo_home(), has_installed_exe("foo_2")); + assert_that(p.cargo("uninstall").arg("foo"), + execs().with_status(0)); + + assert_that(p.cargo("install").arg("--no-default-features"), + execs().with_status(101).with_stderr("\ +[INSTALLING] foo v0.0.1 ([..]) +[FINISHED] release [optimized] target(s) in [..] +[ERROR] no binaries are available for install using the selected features +")); + assert_that(cargo_home(), not(has_installed_exe("foo_1"))); + assert_that(cargo_home(), not(has_installed_exe("foo_2"))); +} + +#[test] +fn dep_feature_in_toml() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [dependencies] + bar = { path = "bar", features = ["a"] } + + [[bin]] + name = "foo" + required-features = ["bar/a"] + + [[example]] + name = "foo" + required-features = ["bar/a"] + + [[test]] + name = "foo" + required-features = ["bar/a"] + + [[bench]] + name = "foo" + required-features = ["bar/a"] + "#) + .file("src/main.rs", "fn main() {}") + .file("examples/foo.rs", "fn main() {}") + .file("tests/foo.rs", "#[test]\nfn test() {}") + .file("benches/foo.rs", r#" + #![feature(test)] + extern crate test; + + #[bench] + fn bench(_: &mut test::Bencher) { + }"#) + .file("bar/Cargo.toml", r#" + [project] + name = "bar" + version = "0.0.1" + authors = [] + + [features] + a = [] + "#) + .file("bar/src/lib.rs", ""); + p.build(); + + assert_that(p.cargo("build"), + execs().with_status(0)); + + // bin + assert_that(p.cargo("build").arg("--bin=foo"), + execs().with_status(0)); + assert_that(&p.bin("foo"), existing_file()); + + // example + assert_that(p.cargo("build").arg("--example=foo"), + execs().with_status(0)); + assert_that(&p.bin("examples/foo"), existing_file()); + + // test + assert_that(p.cargo("test").arg("--test=foo"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] target[/]debug[/]deps[/]foo-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test test ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + +")); + + // bench + if is_nightly() { + assert_that(p.cargo("bench").arg("--bench=foo"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] bar v0.0.1 ({0}/bar) +[COMPILING] foo v0.0.1 ({0}) +[FINISHED] release [optimized] target(s) in [..] +[RUNNING] target[/]release[/]deps[/]foo-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test bench ... bench: [..] 0 ns/iter (+/- 0) + +test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured + +")); + } + + // install + assert_that(p.cargo("install"), + execs().with_status(0)); + assert_that(cargo_home(), has_installed_exe("foo")); + assert_that(p.cargo("uninstall").arg("foo"), + execs().with_status(0)); +} + +#[test] +fn dep_feature_in_cmd_line() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [dependencies] + bar = { path = "bar" } + + [[bin]] + name = "foo" + required-features = ["bar/a"] + + [[example]] + name = "foo" + required-features = ["bar/a"] + + [[test]] + name = "foo" + required-features = ["bar/a"] + + [[bench]] + name = "foo" + required-features = ["bar/a"] + "#) + .file("src/main.rs", "fn main() {}") + .file("examples/foo.rs", "fn main() {}") + .file("tests/foo.rs", "#[test]\nfn test() {}") + .file("benches/foo.rs", r#" + #![feature(test)] + extern crate test; + + #[bench] + fn bench(_: &mut test::Bencher) { + }"#) + .file("bar/Cargo.toml", r#" + [project] + name = "bar" + version = "0.0.1" + authors = [] + + [features] + a = [] + "#) + .file("bar/src/lib.rs", ""); + p.build(); + + assert_that(p.cargo("build"), + execs().with_status(0)); + + // bin + assert_that(p.cargo("build").arg("--bin=foo"), + execs().with_status(101).with_stderr("\ +error: target `foo` requires the features: `bar/a` +Consider enabling them by passing e.g. `--features=\"bar/a\"` +")); + + assert_that(p.cargo("build").arg("--bin=foo").arg("--features").arg("bar/a"), + execs().with_status(0)); + assert_that(&p.bin("foo"), existing_file()); + + // example + assert_that(p.cargo("build").arg("--example=foo"), + execs().with_status(101).with_stderr("\ +error: target `foo` requires the features: `bar/a` +Consider enabling them by passing e.g. `--features=\"bar/a\"` +")); + + assert_that(p.cargo("build").arg("--example=foo").arg("--features").arg("bar/a"), + execs().with_status(0)); + assert_that(&p.bin("examples/foo"), existing_file()); + + // test + assert_that(p.cargo("test"), + execs().with_status(0).with_stderr(format!("\ +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]")) + .with_stdout("")); + + assert_that(p.cargo("test").arg("--test=foo").arg("--features").arg("bar/a"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] target[/]debug[/]deps[/]foo-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test test ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + +")); + + // bench + if is_nightly() { + assert_that(p.cargo("bench"), + execs().with_status(0).with_stderr(format!("\ +[FINISHED] release [optimized] target(s) in [..]")) + .with_stdout("")); + + assert_that(p.cargo("bench").arg("--bench=foo").arg("--features").arg("bar/a"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] bar v0.0.1 ({0}/bar) +[COMPILING] foo v0.0.1 ({0}) +[FINISHED] release [optimized] target(s) in [..] +[RUNNING] target[/]release[/]deps[/]foo-[..][EXE]", p.url())) + .with_stdout(" +running 1 test +test bench ... bench: [..] 0 ns/iter (+/- 0) + +test result: ok. 0 passed; 0 failed; 0 ignored; 1 measured + +")); + } + + // install + assert_that(p.cargo("install"), + execs().with_status(101).with_stderr(format!("\ +[INSTALLING] foo v0.0.1 ([..]) +[FINISHED] release [optimized] target(s) in [..] +[ERROR] no binaries are available for install using the selected features +"))); + assert_that(cargo_home(), not(has_installed_exe("foo"))); + + assert_that(p.cargo("install").arg("--features").arg("bar/a"), + execs().with_status(0)); + assert_that(cargo_home(), has_installed_exe("foo")); + assert_that(p.cargo("uninstall").arg("foo"), + execs().with_status(0)); +} + +#[test] +fn test_skips_compiling_bin_with_missing_required_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + a = [] + + [[bin]] + name = "bin_foo" + path = "src/bin/foo.rs" + required-features = ["a"] + "#) + .file("src/bin/foo.rs", "extern crate bar; fn main() {}") + .file("tests/foo.rs", "") + .file("benches/foo.rs", ""); + p.build(); + + assert_that(p.cargo("test"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] target[/]debug[/]deps[/]foo-[..][EXE]", p.url())) + .with_stdout(" +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured + +")); + + assert_that(p.cargo("test").arg("--features").arg("a").arg("-j").arg("1"), + execs().with_status(101).with_stderr_contains(format!("\ +[COMPILING] foo v0.0.1 ({}) +error[E0463]: can't find crate for `bar`", p.url()))); + + if is_nightly() { + assert_that(p.cargo("bench"), + execs().with_status(0).with_stderr(format!("\ +[COMPILING] foo v0.0.1 ({}) +[FINISHED] release [optimized] target(s) in [..] +[RUNNING] target[/]release[/]deps[/]foo-[..][EXE]", p.url())) + .with_stdout(" +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured + +")); + + assert_that(p.cargo("bench").arg("--features").arg("a").arg("-j").arg("1"), + execs().with_status(101).with_stderr_contains(format!("\ +[COMPILING] foo v0.0.1 ({}) +error[E0463]: can't find crate for `bar`", p.url()))); + } +} + +#[test] +fn run_default() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = [] + a = [] + + [[bin]] + name = "foo" + required-features = ["a"] + "#) + .file("src/lib.rs", "") + .file("src/main.rs", "extern crate foo; fn main() {}"); + p.build(); + + assert_that(p.cargo("run"), + execs().with_status(101).with_stderr("\ +error: target `foo` requires the features: `a` +Consider enabling them by passing e.g. `--features=\"a\"` +")); + + assert_that(p.cargo("run").arg("--features").arg("a"), + execs().with_status(0)); +} + +#[test] +fn run_default_multiple_required_features() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [features] + default = ["a"] + a = [] + b = [] + + [[bin]] + name = "foo1" + path = "src/foo1.rs" + required-features = ["a"] + + [[bin]] + name = "foo2" + path = "src/foo2.rs" + required-features = ["b"] + "#) + .file("src/lib.rs", "") + .file("src/foo1.rs", "extern crate foo; fn main() {}") + .file("src/foo2.rs", "extern crate foo; fn main() {}"); + p.build(); + + assert_that(p.cargo("run"), + execs().with_status(101).with_stderr("\ +error: `cargo run` requires that a project only have one executable; \ +use the `--bin` option to specify which one to run")); +} \ No newline at end of file diff -Nru cargo-0.17.0/tests/resolve.rs cargo-0.19.0/tests/resolve.rs --- cargo-0.17.0/tests/resolve.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/resolve.rs 2017-05-16 03:23:10.000000000 +0000 @@ -18,9 +18,7 @@ -> CargoResult> { let summary = Summary::new(pkg.clone(), deps, HashMap::new()).unwrap(); let method = Method::Everything; - Ok(resolver::resolve(&[(summary, method)], &[], registry)?.iter().map(|p| { - p.clone() - }).collect()) + Ok(resolver::resolve(&[(summary, method)], &[], registry)?.iter().cloned().collect()) } trait ToDep { diff -Nru cargo-0.17.0/tests/run.rs cargo-0.19.0/tests/run.rs --- cargo-0.17.0/tests/run.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/run.rs 2017-05-16 03:23:10.000000000 +0000 @@ -292,22 +292,23 @@ .file("examples/a.rs", r#" fn main() { println!("example"); } "#); + p.build(); - assert_that(p.cargo_process("run").arg("--bin").arg("bin.rs"), + assert_that(p.cargo("run").arg("--bin").arg("bin.rs"), execs().with_status(101).with_stderr("\ [ERROR] no bin target named `bin.rs`")); - assert_that(p.cargo_process("run").arg("--bin").arg("a.rs"), + assert_that(p.cargo("run").arg("--bin").arg("a.rs"), execs().with_status(101).with_stderr("\ [ERROR] no bin target named `a.rs` Did you mean `a`?")); - assert_that(p.cargo_process("run").arg("--example").arg("example.rs"), + assert_that(p.cargo("run").arg("--example").arg("example.rs"), execs().with_status(101).with_stderr("\ [ERROR] no example target named `example.rs`")); - assert_that(p.cargo_process("run").arg("--example").arg("a.rs"), + assert_that(p.cargo("run").arg("--example").arg("a.rs"), execs().with_status(101).with_stderr("\ [ERROR] no example target named `a.rs` @@ -597,30 +598,37 @@ #[test] fn run_with_library_paths() { - let p = project("foo") - .file("Cargo.toml", r#" + let mut p = project("foo"); + + // Only link search directories within the target output directory are + // propagated through to dylib_path_envvar() (see #3366). + let mut dir1 = p.target_debug_dir(); + dir1.push("foo\\backslash"); + + let mut dir2 = p.target_debug_dir(); + dir2.push("dir=containing=equal=signs"); + + p = p.file("Cargo.toml", r#" [project] name = "foo" version = "0.0.1" authors = [] build = "build.rs" "#) - .file("build.rs", r#" - fn main() { - println!("cargo:rustc-link-search=native=foo"); - println!("cargo:rustc-link-search=bar"); - println!("cargo:rustc-link-search=/path=containing=equal=signs"); - } - "#) - .file("src/main.rs", &format!(r#" + .file("build.rs", &format!(r##" + fn main() {{ + println!(r#"cargo:rustc-link-search=native={}"#); + println!(r#"cargo:rustc-link-search={}"#); + }} + "##, dir1.display(), dir2.display())) + .file("src/main.rs", &format!(r##" fn main() {{ let search_path = std::env::var_os("{}").unwrap(); let paths = std::env::split_paths(&search_path).collect::>(); - assert!(paths.contains(&"foo".into())); - assert!(paths.contains(&"bar".into())); - assert!(paths.contains(&"/path=containing=equal=signs".into())); + assert!(paths.contains(&r#"{}"#.into())); + assert!(paths.contains(&r#"{}"#.into())); }} - "#, dylib_path_envvar())); + "##, dylib_path_envvar(), dir1.display(), dir2.display())); assert_that(p.cargo_process("run"), execs().with_status(0)); } @@ -645,3 +653,80 @@ .with_stdout("") .with_stderr("")); } + +#[test] +fn run_multiple_packages() { + let p = project("foo") + .file("foo/Cargo.toml", r#" + [package] + name = "foo" + version = "0.0.1" + authors = [] + + [workspace] + + [dependencies] + d1 = { path = "d1" } + d2 = { path = "d2" } + d3 = { path = "../d3" } # outside of the workspace + + [[bin]] + name = "foo" + "#) + .file("foo/src/foo.rs", "fn main() { println!(\"foo\"); }") + .file("foo/d1/Cargo.toml", r#" + [package] + name = "d1" + version = "0.0.1" + authors = [] + + [[bin]] + name = "d1" + "#) + .file("foo/d1/src/lib.rs", "") + .file("foo/d1/src/main.rs", "fn main() { println!(\"d1\"); }") + .file("foo/d2/Cargo.toml", r#" + [package] + name = "d2" + version = "0.0.1" + authors = [] + + [[bin]] + name = "d2" + "#) + .file("foo/d2/src/main.rs", "fn main() { println!(\"d2\"); }") + .file("d3/Cargo.toml", r#" + [package] + name = "d3" + version = "0.0.1" + authors = [] + "#) + .file("d3/src/main.rs", "fn main() { println!(\"d2\"); }"); + + let p = p.build(); + + let cargo = || { + let mut process_builder = p.cargo("run"); + process_builder.cwd(p.root().join("foo")); + process_builder + }; + + assert_that(cargo().arg("-p").arg("d1"), + execs().with_status(0).with_stdout("d1")); + + assert_that(cargo().arg("-p").arg("d2").arg("--bin").arg("d2"), + execs().with_status(0).with_stdout("d2")); + + assert_that(cargo(), + execs().with_status(0).with_stdout("foo")); + + assert_that(cargo().arg("-p").arg("d1").arg("-p").arg("d2"), + execs() + .with_status(1) + .with_stderr_contains("[ERROR] Invalid arguments.")); + + assert_that(cargo().arg("-p").arg("d3"), + execs() + .with_status(101) + .with_stderr_contains("[ERROR] package `d3` is not a member of the workspace")); +} diff -Nru cargo-0.17.0/tests/rustflags.rs cargo-0.19.0/tests/rustflags.rs --- cargo-0.17.0/tests/rustflags.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/rustflags.rs 2017-05-16 03:23:10.000000000 +0000 @@ -189,7 +189,7 @@ #[bench] fn run1(_ben: &mut test::Bencher) { }"#); p.build(); - let ref host = rustc_host(); + let host = &rustc_host(); // Use RUSTFLAGS to pass an argument that will generate an error assert_that(p.cargo("build").env("RUSTFLAGS", "-Z bogus") @@ -951,6 +951,79 @@ } #[test] +fn cfg_rustflags_normal_source() { + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.0.1" + "#) + .file("src/lib.rs", "") + .file("src/bin/a.rs", "fn main() {}") + .file("examples/b.rs", "fn main() {}") + .file("tests/c.rs", "#[test] fn f() { }") + .file("benches/d.rs", r#" + #![feature(test)] + extern crate test; + #[bench] fn run1(_ben: &mut test::Bencher) { }"#) + .file(".cargo/config", " + [target.'cfg(feature=\"feat\")'] + rustflags = [\"-Z\", \"bogus\"] + "); + p.build(); + + assert_that(p.cargo("build").arg("--features").arg("\"feat\"") + .arg("--lib"), + execs().with_status(101)); + assert_that(p.cargo("build").arg("--features").arg("\"feat\"") + .arg("--bin=a"), + execs().with_status(101)); + assert_that(p.cargo("build").arg("--features").arg("\"feat\"") + .arg("--example=b"), + execs().with_status(101)); + assert_that(p.cargo("test").arg("--features").arg("\"feat\""), + execs().with_status(101)); + assert_that(p.cargo("bench").arg("--features").arg("\"feat\""), + execs().with_status(101)); +} + +// target.'cfg(...)'.rustflags takes precedence over build.rustflags +#[test] +fn cfg_rustflags_precedence() { + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.0.1" + "#) + .file("src/lib.rs", "") + .file(".cargo/config", " + [build] + rustflags = [\"--cfg\", \"foo\"] + + [target.'cfg(feature = \"feat\"')] + rustflags = [\"-Z\", \"bogus\"] + "); + p.build(); + + assert_that(p.cargo("build").arg("--features").arg("\"feat\"") + .arg("--lib"), + execs().with_status(101)); + assert_that(p.cargo("build").arg("--features").arg("\"feat\"") + .arg("--bin=a"), + execs().with_status(101)); + assert_that(p.cargo("build").arg("--features").arg("\"feat\"") + .arg("--example=b"), + execs().with_status(101)); + assert_that(p.cargo("test").arg("--features").arg("\"feat\""), + execs().with_status(101)); + assert_that(p.cargo("bench").arg("--features").arg("\"feat\""), + execs().with_status(101)); +} + + + +#[test] fn target_rustflags_string_and_array_form1() { let p1 = project("foo") .file("Cargo.toml", r#" diff -Nru cargo-0.17.0/tests/search.rs cargo-0.19.0/tests/search.rs --- cargo-0.17.0/tests/search.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/search.rs 2017-05-16 03:23:10.000000000 +0000 @@ -87,7 +87,7 @@ .with_stderr("\ [UPDATING] registry `[..]`") .with_stdout("\ -hoare (0.1.1) Design by contract style assertions for Rust")); +hoare = \"0.1.1\" # Design by contract style assertions for Rust")); } #[test] @@ -139,7 +139,7 @@ .with_stderr("\ [UPDATING] registry `[..]`") .with_stdout("\ -hoare (0.1.1) Design by contract style assertions for Rust")); +hoare = \"0.1.1\" # Design by contract style assertions for Rust")); } #[test] diff -Nru cargo-0.17.0/tests/test.rs cargo-0.19.0/tests/test.rs --- cargo-0.17.0/tests/test.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/test.rs 2017-05-16 03:23:10.000000000 +0000 @@ -7,7 +7,7 @@ use std::str; use cargotest::{sleep_ms, is_nightly}; -use cargotest::support::{project, execs, basic_bin_manifest, basic_lib_manifest}; +use cargotest::support::{project, execs, basic_bin_manifest, basic_lib_manifest, cargo_exe}; use cargotest::support::paths::CargoPathExt; use cargotest::support::registry::Package; use hamcrest::{assert_that, existing_file, is_not}; @@ -118,6 +118,41 @@ } #[test] +fn cargo_test_overflow_checks() { + if !is_nightly() { + return; + } + let p = project("foo") + .file("Cargo.toml", r#" + [package] + name = "foo" + version = "0.5.0" + authors = [] + + [[bin]] + name = "foo" + + [profile.release] + overflow-checks = true + "#) + .file("src/foo.rs", r#" + use std::panic; + pub fn main() { + let r = panic::catch_unwind(|| { + [1, i32::max_value()].iter().sum::(); + }); + assert!(r.is_err()); + }"#); + + assert_that(p.cargo_process("build").arg("--release"), + execs().with_status(0)); + assert_that(&p.release_bin("foo"), existing_file()); + + assert_that(process(&p.release_bin("foo")), + execs().with_status(0).with_stdout("")); +} + +#[test] fn cargo_test_verbose() { let p = project("foo") .file("Cargo.toml", &basic_bin_manifest("foo")) @@ -172,7 +207,7 @@ } #[test] -fn cargo_test_failing_test() { +fn cargo_test_failing_test_in_bin() { let p = project("foo") .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", r#" @@ -200,7 +235,7 @@ [COMPILING] foo v0.5.0 ({url}) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] [RUNNING] target[/]debug[/]deps[/]foo-[..][EXE] -[ERROR] test failed", url = p.url())) +[ERROR] test failed, to rerun pass '--bin foo'", url = p.url())) .with_stdout_contains(" running 1 test test test_hello ... FAILED @@ -222,6 +257,93 @@ } #[test] +fn cargo_test_failing_test_in_test() { + let p = project("foo") + .file("Cargo.toml", &basic_bin_manifest("foo")) + .file("src/foo.rs", r#" + pub fn main() { + println!("hello"); + }"#) + .file("tests/footest.rs", r#" + #[test] + fn test_hello() { + assert!(false) + }"#); + + assert_that(p.cargo_process("build"), execs().with_status(0)); + assert_that(&p.bin("foo"), existing_file()); + + assert_that(process(&p.bin("foo")), + execs().with_status(0).with_stdout("hello\n")); + + assert_that(p.cargo("test"), + execs().with_stderr(format!("\ +[COMPILING] foo v0.5.0 ({url}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] target[/]debug[/]deps[/]foo-[..][EXE] +[RUNNING] target[/]debug[/]deps[/]footest-[..][EXE] +[ERROR] test failed, to rerun pass '--test footest'", url = p.url())) + .with_stdout_contains(" +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured + + +running 1 test +test test_hello ... FAILED + +failures: + +---- test_hello stdout ---- +thread 'test_hello' panicked at 'assertion failed: false', \ + tests[/]footest.rs:4 +") + .with_stdout_contains("\ +failures: + test_hello + +test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured +") + .with_status(101)); +} + +#[test] +fn cargo_test_failing_test_in_lib() { + let p = project("foo") + .file("Cargo.toml", &basic_lib_manifest("foo")) + .file("src/lib.rs", r#" + #[test] + fn test_hello() { + assert!(false) + }"#); + + assert_that(p.cargo_process("test"), + execs().with_stderr(format!("\ +[COMPILING] foo v0.5.0 ({url}) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +[RUNNING] target[/]debug[/]deps[/]foo-[..][EXE] +[ERROR] test failed, to rerun pass '--lib'", url = p.url())) + .with_stdout_contains(" +running 1 test +test test_hello ... FAILED + +failures: + +---- test_hello stdout ---- +thread 'test_hello' panicked at 'assertion failed: false', \ + src[/]lib.rs:4 +") + .with_stdout_contains("\ +failures: + test_hello + +test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured +") + .with_status(101)); +} + + +#[test] fn test_with_lib_dep() { let p = project("foo") .file("Cargo.toml", r#" @@ -259,18 +381,18 @@ execs().with_status(0).with_stderr(format!("\ [COMPILING] foo v0.0.1 ({}) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] -[RUNNING] target[/]debug[/]deps[/]baz-[..][EXE] [RUNNING] target[/]debug[/]deps[/]foo-[..][EXE] +[RUNNING] target[/]debug[/]deps[/]baz-[..][EXE] [DOCTEST] foo", p.url())) .with_stdout(" running 1 test -test bin_test ... ok +test lib_test ... ok test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured running 1 test -test lib_test ... ok +test bin_test ... ok test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured @@ -424,18 +546,18 @@ execs().with_status(0).with_stderr(format!("\ [COMPILING] foo v0.0.1 ({}) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] -[RUNNING] target[/]debug[/]deps[/]external-[..][EXE] [RUNNING] target[/]debug[/]deps[/]foo-[..][EXE] +[RUNNING] target[/]debug[/]deps[/]external-[..][EXE] [DOCTEST] foo", p.url())) .with_stdout(" running 1 test -test external_test ... ok +test internal_test ... ok test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured running 1 test -test internal_test ... ok +test external_test ... ok test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured @@ -1625,12 +1747,13 @@ authors = [] "#) .file("src/lib.rs", ""); + p.build(); - assert_that(p.cargo_process("run").arg("--example").arg("foo"), + assert_that(p.cargo("run").arg("--example").arg("foo"), execs().with_status(101).with_stderr("\ [ERROR] no example target named `foo` ")); - assert_that(p.cargo_process("run").arg("--bin").arg("foo"), + assert_that(p.cargo("run").arg("--bin").arg("foo"), execs().with_status(101).with_stderr("\ [ERROR] no bin target named `foo` ")); @@ -2169,9 +2292,6 @@ #[test] fn test_panic_abort_with_dep() { - if !is_nightly() { - return - } let p = project("foo") .file("Cargo.toml", r#" [package] @@ -2204,9 +2324,6 @@ #[test] fn cfg_test_even_with_no_harness() { - if !is_nightly() { - return - } let p = project("foo") .file("Cargo.toml", r#" [package] @@ -2298,36 +2415,51 @@ authors = [] [features] - default = ["serde_codegen"] - nightly = ["serde_derive"] + default = ["mock_serde_codegen"] + nightly = ["mock_serde_derive"] [dependencies] - serde_derive = { version = "0.8", optional = true } + mock_serde_derive = { path = "../mock_serde_derive", optional = true } [build-dependencies] - serde_codegen = { version = "0.8", optional = true } + mock_serde_codegen = { path = "../mock_serde_codegen", optional = true } "#) .file("libs/feature_a/src/lib.rs", r#" - #[cfg(feature = "serde_derive")] + #[cfg(feature = "mock_serde_derive")] const MSG: &'static str = "This is safe"; - #[cfg(feature = "serde_codegen")] + #[cfg(feature = "mock_serde_codegen")] const MSG: &'static str = "This is risky"; pub fn get() -> &'static str { MSG } - "#); + "#) + .file("libs/mock_serde_derive/Cargo.toml", r#" + [package] + name = "mock_serde_derive" + version = "0.1.0" + authors = [] + "#) + .file("libs/mock_serde_derive/src/lib.rs", "") + .file("libs/mock_serde_codegen/Cargo.toml", r#" + [package] + name = "mock_serde_codegen" + version = "0.1.0" + authors = [] + "#) + .file("libs/mock_serde_codegen/src/lib.rs", ""); + p.build(); - assert_that(p.cargo_process("test") + assert_that(p.cargo("test") .arg("--package").arg("feature_a") .arg("--verbose"), execs().with_status(0) .with_stderr_contains("\ [DOCTEST] feature_a -[RUNNING] `rustdoc --test [..]serde_codegen[..]`")); +[RUNNING] `rustdoc --test [..]mock_serde_codegen[..]`")); - assert_that(p.cargo_process("test") + assert_that(p.cargo("test") .arg("--verbose"), execs().with_status(0) .with_stderr_contains("\ @@ -2392,9 +2524,8 @@ authors = [] "#) .file("a/src/lib.rs", ""); - p.build(); - assert_that(p.cargo("test").arg("-v") + assert_that(p.cargo_process("test").arg("-v") .arg("-p").arg("a") .arg("-p").arg("foo") .arg("--features").arg("foo"), @@ -2427,7 +2558,6 @@ #[test] fn bar_test() {} "#); - p.build(); assert_that(p.cargo_process("test") .arg("--all"), @@ -2472,7 +2602,6 @@ #[test] fn b() {} "#); - p.build(); assert_that(p.cargo_process("test") .arg("--all"), @@ -2511,7 +2640,6 @@ #[test] fn a() {} "#); - p.build(); Package::new("a", "0.1.0").publish(); @@ -2559,6 +2687,61 @@ } #[test] +fn test_many_targets() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.1.0" + "#) + .file("src/bin/a.rs", r#" + fn main() {} + #[test] fn bin_a() {} + "#) + .file("src/bin/b.rs", r#" + fn main() {} + #[test] fn bin_b() {} + "#) + .file("src/bin/c.rs", r#" + fn main() {} + #[test] fn bin_c() { panic!(); } + "#) + .file("examples/a.rs", r#" + fn main() {} + #[test] fn example_a() {} + "#) + .file("examples/b.rs", r#" + fn main() {} + #[test] fn example_b() {} + "#) + .file("examples/c.rs", r#" + #[test] fn example_c() { panic!(); } + "#) + .file("tests/a.rs", r#" + #[test] fn test_a() {} + "#) + .file("tests/b.rs", r#" + #[test] fn test_b() {} + "#) + .file("tests/c.rs", r#" + does not compile + "#); + + assert_that(p.cargo_process("test").arg("--verbose") + .arg("--bin").arg("a").arg("--bin").arg("b") + .arg("--example").arg("a").arg("--example").arg("b") + .arg("--test").arg("a").arg("--test").arg("b"), + execs() + .with_status(0) + .with_stdout_contains("test bin_a ... ok") + .with_stdout_contains("test bin_b ... ok") + .with_stdout_contains("test test_a ... ok") + .with_stdout_contains("test test_b ... ok") + .with_stderr_contains("[RUNNING] `rustc --crate-name a examples[/]a.rs [..]`") + .with_stderr_contains("[RUNNING] `rustc --crate-name b examples[/]b.rs [..]`")) +} + +#[test] fn doctest_and_registry() { let p = project("workspace") .file("Cargo.toml", r#" @@ -2599,3 +2782,72 @@ assert_that(p.cargo_process("test").arg("--all").arg("-v"), execs().with_status(0)); } + +#[test] +fn cargo_test_env() { + let src = format!(r#" + #![crate_type = "rlib"] + + #[test] + fn env_test() {{ + use std::env; + println!("{{}}", env::var("{}").unwrap()); + }} + "#, cargo::CARGO_ENV); + + let p = project("env_test") + .file("Cargo.toml", &basic_lib_manifest("env_test")) + .file("src/lib.rs", &src); + + let mut pr = p.cargo_process("test"); + let cargo = cargo_exe().canonicalize().unwrap(); + assert_that(pr.args(&["--lib", "--", "--nocapture"]), + execs().with_status(0).with_stdout(format!(" +running 1 test +{} +test env_test ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + +", cargo.to_str().unwrap()))); +} + +#[test] +fn test_order() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.1.0" + "#) + .file("src/lib.rs", r#" + #[test] fn test_lib() {} + "#) + .file("tests/a.rs", r#" + #[test] fn test_a() {} + "#) + .file("tests/z.rs", r#" + #[test] fn test_z() {} + "#); + + assert_that(p.cargo_process("test").arg("--all"), + execs().with_status(0).with_stdout_contains("\ +running 1 test +test test_lib ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + + +running 1 test +test test_a ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + + +running 1 test +test test_z ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured +")); + +} diff -Nru cargo-0.17.0/tests/version.rs cargo-0.19.0/tests/version.rs --- cargo-0.17.0/tests/version.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/version.rs 2017-05-16 03:23:10.000000000 +0000 @@ -9,12 +9,13 @@ #[test] fn simple() { let p = project("foo"); + p.build(); - assert_that(p.cargo_process("version"), + assert_that(p.cargo("version"), execs().with_status(0).with_stdout(&format!("{}\n", cargo::version()))); - assert_that(p.cargo_process("--version"), + assert_that(p.cargo("--version"), execs().with_status(0).with_stdout(&format!("{}\n", cargo::version()))); diff -Nru cargo-0.17.0/tests/warn-on-failure.rs cargo-0.19.0/tests/warn-on-failure.rs --- cargo-0.17.0/tests/warn-on-failure.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/tests/warn-on-failure.rs 2017-05-16 03:23:10.000000000 +0000 @@ -0,0 +1,92 @@ +extern crate cargotest; +extern crate hamcrest; + +use cargotest::support::{project, execs, ProjectBuilder}; +use cargotest::support::registry::Package; +use hamcrest::assert_that; + +static WARNING1: &'static str = "Hello! I'm a warning. :)"; +static WARNING2: &'static str = "And one more!"; + +fn make_lib(lib_src: &str) { + Package::new("foo", "0.0.1") + .file("Cargo.toml", r#" + [package] + name = "foo" + authors = [] + version = "0.0.1" + build = "build.rs" + "#) + .file("build.rs", &format!(r#" + fn main() {{ + use std::io::Write; + println!("cargo:warning={{}}", "{}"); + println!("hidden stdout"); + write!(&mut ::std::io::stderr(), "hidden stderr"); + println!("cargo:warning={{}}", "{}"); + }} + "#, WARNING1, WARNING2)) + .file("src/lib.rs", &format!("fn f() {{ {} }}", lib_src)) + .publish(); +} + +fn make_upstream(main_src: &str) -> ProjectBuilder { + project("bar") + .file("Cargo.toml", r#" + [package] + name = "bar" + version = "0.0.1" + authors = [] + + [dependencies] + foo = "*" + "#) + .file("src/main.rs", &format!("fn main() {{ {} }}", main_src)) +} + +#[test] +fn no_warning_on_success() { + make_lib(""); + let upstream = make_upstream(""); + assert_that(upstream.cargo_process("build"), + execs().with_status(0) + .with_stderr("\ +[UPDATING] registry `[..]` +[DOWNLOADING] foo v0.0.1 ([..]) +[COMPILING] foo v0.0.1 +[COMPILING] bar v0.0.1 ([..]) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +")); +} + +#[test] +fn no_warning_on_bin_failure() { + make_lib(""); + let upstream = make_upstream("hi()"); + assert_that(upstream.cargo_process("build"), + execs().with_status(101) + .with_stdout_does_not_contain("hidden stdout") + .with_stderr_does_not_contain("hidden stderr") + .with_stderr_does_not_contain(&format!("[WARNING] {}", WARNING1)) + .with_stderr_does_not_contain(&format!("[WARNING] {}", WARNING2)) + .with_stderr_contains("[UPDATING] registry `[..]`") + .with_stderr_contains("[DOWNLOADING] foo v0.0.1 ([..])") + .with_stderr_contains("[COMPILING] foo v0.0.1") + .with_stderr_contains("[COMPILING] bar v0.0.1 ([..])")); +} + +#[test] +fn warning_on_lib_failure() { + make_lib("err()"); + let upstream = make_upstream(""); + assert_that(upstream.cargo_process("build"), + execs().with_status(101) + .with_stdout_does_not_contain("hidden stdout") + .with_stderr_does_not_contain("hidden stderr") + .with_stderr_does_not_contain("[COMPILING] bar v0.0.1 ([..])") + .with_stderr_contains("[UPDATING] registry `[..]`") + .with_stderr_contains("[DOWNLOADING] foo v0.0.1 ([..])") + .with_stderr_contains("[COMPILING] foo v0.0.1") + .with_stderr_contains(&format!("[WARNING] {}", WARNING1)) + .with_stderr_contains(&format!("[WARNING] {}", WARNING2))); +} diff -Nru cargo-0.17.0/tests/workspaces.rs cargo-0.19.0/tests/workspaces.rs --- cargo-0.17.0/tests/workspaces.rs 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/tests/workspaces.rs 2017-05-16 03:23:10.000000000 +0000 @@ -1103,6 +1103,39 @@ } #[test] +fn relative_path_for_root_works() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "foo" + version = "0.1.0" + authors = [] + + [workspace] + + [dependencies] + subproj = { path = "./subproj" } + "#) + .file("src/main.rs", "fn main() {}") + .file("subproj/Cargo.toml", r#" + [project] + name = "subproj" + version = "0.1.0" + authors = [] + "#) + .file("subproj/src/main.rs", "fn main() {}"); + p.build(); + + assert_that(p.cargo("build").cwd(p.root()) + .arg("--manifest-path").arg("./Cargo.toml"), + execs().with_status(0)); + + assert_that(p.cargo("build").cwd(p.root().join("subproj")) + .arg("--manifest-path").arg("../Cargo.toml"), + execs().with_status(0)); +} + +#[test] fn path_dep_outside_workspace_is_not_member() { let p = project("foo") .file("ws/Cargo.toml", r#" @@ -1227,8 +1260,121 @@ assert_that(p.cargo("build").cwd(p.root().join("foo/bar")), execs().with_status(0)); - // Ideally, `foo/bar` should be a member of the workspace, - // because it is hierarchically under the workspace member. - assert_that(&p.root().join("foo/bar/Cargo.lock"), existing_file()); + + assert_that(&p.root().join("foo/bar/Cargo.lock"), is_not(existing_file())); + assert_that(&p.root().join("foo/bar/target"), is_not(existing_dir())); +} + +#[test] +fn excluded_simple() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "ws" + version = "0.1.0" + authors = [] + + [workspace] + exclude = ["foo"] + "#) + .file("src/lib.rs", "") + .file("foo/Cargo.toml", r#" + [project] + name = "foo" + version = "0.1.0" + authors = [] + "#) + .file("foo/src/lib.rs", ""); + p.build(); + + assert_that(p.cargo("build"), + execs().with_status(0)); + assert_that(&p.root().join("target"), existing_dir()); + assert_that(p.cargo("build").cwd(p.root().join("foo")), + execs().with_status(0)); + assert_that(&p.root().join("foo/target"), existing_dir()); +} + +#[test] +fn exclude_members_preferred() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "ws" + version = "0.1.0" + authors = [] + + [workspace] + members = ["foo/bar"] + exclude = ["foo"] + "#) + .file("src/lib.rs", "") + .file("foo/Cargo.toml", r#" + [project] + name = "foo" + version = "0.1.0" + authors = [] + "#) + .file("foo/src/lib.rs", "") + .file("foo/bar/Cargo.toml", r#" + [project] + name = "bar" + version = "0.1.0" + authors = [] + "#) + .file("foo/bar/src/lib.rs", ""); + p.build(); + + assert_that(p.cargo("build"), + execs().with_status(0)); + assert_that(&p.root().join("target"), existing_dir()); + assert_that(p.cargo("build").cwd(p.root().join("foo")), + execs().with_status(0)); + assert_that(&p.root().join("foo/target"), existing_dir()); + assert_that(p.cargo("build").cwd(p.root().join("foo/bar")), + execs().with_status(0)); + assert_that(&p.root().join("foo/bar/target"), is_not(existing_dir())); +} + +#[test] +fn exclude_but_also_depend() { + let p = project("foo") + .file("Cargo.toml", r#" + [project] + name = "ws" + version = "0.1.0" + authors = [] + + [dependencies] + bar = { path = "foo/bar" } + + [workspace] + exclude = ["foo"] + "#) + .file("src/lib.rs", "") + .file("foo/Cargo.toml", r#" + [project] + name = "foo" + version = "0.1.0" + authors = [] + "#) + .file("foo/src/lib.rs", "") + .file("foo/bar/Cargo.toml", r#" + [project] + name = "bar" + version = "0.1.0" + authors = [] + "#) + .file("foo/bar/src/lib.rs", ""); + p.build(); + + assert_that(p.cargo("build"), + execs().with_status(0)); + assert_that(&p.root().join("target"), existing_dir()); + assert_that(p.cargo("build").cwd(p.root().join("foo")), + execs().with_status(0)); + assert_that(&p.root().join("foo/target"), existing_dir()); + assert_that(p.cargo("build").cwd(p.root().join("foo/bar")), + execs().with_status(0)); assert_that(&p.root().join("foo/bar/target"), existing_dir()); } diff -Nru cargo-0.17.0/.travis.yml cargo-0.19.0/.travis.yml --- cargo-0.17.0/.travis.yml 2017-03-03 19:21:26.000000000 +0000 +++ cargo-0.19.0/.travis.yml 2017-05-16 03:23:10.000000000 +0000 @@ -17,6 +17,7 @@ IMAGE=dist MAKE_TARGETS="test distcheck doc install uninstall" ALLOW_PR=1 + NO_ADD=1 - env: TARGET=i686-unknown-linux-gnu IMAGE=dist MAKE_TARGETS=test-unit-i686-unknown-linux-gnu @@ -27,6 +28,7 @@ ALT=i686-apple-darwin MAKE_TARGETS="test distcheck doc install uninstall" MACOSX_DEPLOYMENT_TARGET=10.7 + NO_ADD=1 os: osx - env: TARGET=i686-apple-darwin MAKE_TARGETS=test-unit-i686-apple-darwin @@ -41,6 +43,8 @@ MAKE_TARGETS=test-unit-$TARGET # cross compiled targets + - env: TARGET=armv7-linux-androideabi + IMAGE=android - env: TARGET=arm-unknown-linux-gnueabi IMAGE=cross - env: TARGET=arm-unknown-linux-gnueabihf @@ -84,24 +88,22 @@ IMAGE=dist MAKE_TARGETS="test distcheck doc install uninstall" DEPLOY=0 + NO_ADD=1 rust: beta - env: TARGET=x86_64-unknown-linux-gnu ALT=i686-unknown-linux-gnu IMAGE=dist MAKE_TARGETS="test distcheck doc install uninstall" DEPLOY=0 + NO_ADD=1 rust: nightly exclude: - rust: stable before_script: - - curl https://static.rust-lang.org/rustup.sh | - sh -s -- --add-target=$TARGET --disable-sudo -y --prefix=`rustc --print sysroot` - - if [ ! -z "$ALT" ]; then - curl https://static.rust-lang.org/rustup.sh | - sh -s -- --add-target=$ALT --disable-sudo -y --prefix=`rustc --print sysroot`; - fi + - if [ -z "$NO_ADD" ]; then rustup target add $TARGET; fi + - if [ ! -z "$ALT" ]; then rustup target add $ALT; fi script: - > if [ "$ALLOW_PR" = "" ] && [ "$TRAVIS_PULL_REQUEST" != "false" ]; then @@ -154,3 +156,7 @@ on: branch: auto-cargo condition: $DEPLOY = 1 + +cache: + directories: + - target/openssl diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/benches/bench.rs cargo-0.19.0/vendor/aho-corasick-0.6.3/benches/bench.rs --- cargo-0.17.0/vendor/aho-corasick-0.6.3/benches/bench.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/benches/bench.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,339 @@ +#![feature(test)] + +extern crate aho_corasick; +extern crate test; + +use std::iter; + +use aho_corasick::{Automaton, AcAutomaton, Transitions}; +use test::Bencher; + +const HAYSTACK_RANDOM: &'static str = include_str!("random.txt"); +const HAYSTACK_SHERLOCK: &'static str = include_str!("sherlock.txt"); + +fn bench_aut_no_match, T: Transitions>( + b: &mut Bencher, + aut: AcAutomaton, + haystack: &str, +) { + b.bytes = haystack.len() as u64; + b.iter(|| assert!(aut.find(haystack).next().is_none())); +} + +fn bench_box_aut_no_match, T: Transitions>( + b: &mut Bencher, + aut: AcAutomaton, + haystack: &str, +) { + b.bytes = haystack.len() as u64; + let aut: &Automaton

= &aut; + b.iter(|| assert!(Automaton::find(&aut, haystack).next().is_none())); +} + +fn bench_full_aut_no_match, T: Transitions>( + b: &mut Bencher, + aut: AcAutomaton, + haystack: &str, +) { + let aut = aut.into_full(); + b.bytes = haystack.len() as u64; + b.iter(|| assert!(aut.find(haystack).next().is_none())); +} + +fn bench_full_aut_overlapping_no_match, T: Transitions>( + b: &mut Bencher, + aut: AcAutomaton, + haystack: &str, +) { + let aut = aut.into_full(); + b.bytes = haystack.len() as u64; + b.iter(|| assert!(aut.find_overlapping(haystack).count() == 0)); +} + +fn bench_naive_no_match(b: &mut Bencher, needles: Vec, haystack: &str) + where S: Into { + b.bytes = haystack.len() as u64; + let needles: Vec = needles.into_iter().map(Into::into).collect(); + b.iter(|| assert!(!naive_find(&needles, haystack))); +} + +fn haystack_same(letter: char) -> String { + iter::repeat(letter).take(10000).collect() +} + +macro_rules! aut_benches { + ($prefix:ident, $aut:expr, $bench:expr) => { + mod $prefix { +#![allow(unused_imports)] +use aho_corasick::{Automaton, AcAutomaton, Sparse}; +use test::Bencher; + +use super::{ + HAYSTACK_RANDOM, haystack_same, + bench_aut_no_match, bench_box_aut_no_match, + bench_full_aut_no_match, bench_full_aut_overlapping_no_match, +}; + +#[bench] +fn ac_one_byte(b: &mut Bencher) { + let aut = $aut(vec!["a"]); + $bench(b, aut, &haystack_same('z')); +} + +#[bench] +fn ac_one_prefix_byte_no_match(b: &mut Bencher) { + let aut = $aut(vec!["zbc"]); + $bench(b, aut, &haystack_same('y')); +} + +#[bench] +fn ac_one_prefix_byte_every_match(b: &mut Bencher) { + // We lose the benefit of `memchr` because the first byte matches + // in every position in the haystack. + let aut = $aut(vec!["zbc"]); + $bench(b, aut, &haystack_same('z')); +} + +#[bench] +fn ac_one_prefix_byte_random(b: &mut Bencher) { + let aut = $aut(vec!["zbc\x00"]); + $bench(b, aut, HAYSTACK_RANDOM); +} + +#[bench] +fn ac_two_bytes(b: &mut Bencher) { + let aut = $aut(vec!["a", "b"]); + $bench(b, aut, &haystack_same('z')); +} + +#[bench] +fn ac_two_diff_prefix(b: &mut Bencher) { + let aut = $aut(vec!["abcdef", "bmnopq"]); + $bench(b, aut, &haystack_same('z')); +} + +#[bench] +fn ac_two_one_prefix_byte_every_match(b: &mut Bencher) { + let aut = $aut(vec!["zbcdef", "zmnopq"]); + $bench(b, aut, &haystack_same('z')); +} + +#[bench] +fn ac_two_one_prefix_byte_no_match(b: &mut Bencher) { + let aut = $aut(vec!["zbcdef", "zmnopq"]); + $bench(b, aut, &haystack_same('y')); +} + +#[bench] +fn ac_two_one_prefix_byte_random(b: &mut Bencher) { + let aut = $aut(vec!["zbcdef\x00", "zmnopq\x00"]); + $bench(b, aut, HAYSTACK_RANDOM); +} + +#[bench] +fn ac_ten_bytes(b: &mut Bencher) { + let aut = $aut(vec!["a", "b", "c", "d", "e", + "f", "g", "h", "i", "j"]); + $bench(b, aut, &haystack_same('z')); +} + +#[bench] +fn ac_ten_diff_prefix(b: &mut Bencher) { + let aut = $aut(vec!["abcdef", "bbcdef", "cbcdef", "dbcdef", + "ebcdef", "fbcdef", "gbcdef", "hbcdef", + "ibcdef", "jbcdef"]); + $bench(b, aut, &haystack_same('z')); +} + +#[bench] +fn ac_ten_one_prefix_byte_every_match(b: &mut Bencher) { + let aut = $aut(vec!["zacdef", "zbcdef", "zccdef", "zdcdef", + "zecdef", "zfcdef", "zgcdef", "zhcdef", + "zicdef", "zjcdef"]); + $bench(b, aut, &haystack_same('z')); +} + +#[bench] +fn ac_ten_one_prefix_byte_no_match(b: &mut Bencher) { + let aut = $aut(vec!["zacdef", "zbcdef", "zccdef", "zdcdef", + "zecdef", "zfcdef", "zgcdef", "zhcdef", + "zicdef", "zjcdef"]); + $bench(b, aut, &haystack_same('y')); +} + +#[bench] +fn ac_ten_one_prefix_byte_random(b: &mut Bencher) { + let aut = $aut(vec!["zacdef\x00", "zbcdef\x00", "zccdef\x00", + "zdcdef\x00", "zecdef\x00", "zfcdef\x00", + "zgcdef\x00", "zhcdef\x00", "zicdef\x00", + "zjcdef\x00"]); + $bench(b, aut, HAYSTACK_RANDOM); +} + } + } +} + +aut_benches!(dense, AcAutomaton::new, bench_aut_no_match); +aut_benches!(dense_boxed, AcAutomaton::new, bench_box_aut_no_match); +aut_benches!(sparse, AcAutomaton::<&str, Sparse>::with_transitions, + bench_aut_no_match); +aut_benches!(full, AcAutomaton::new, bench_full_aut_no_match); +aut_benches!(full_overlap, AcAutomaton::new, bench_full_aut_overlapping_no_match); + +// A naive multi-pattern search. +// We use this to benchmark *throughput*, so it should never match anything. +fn naive_find(needles: &[String], haystack: &str) -> bool { + for hi in 0..haystack.len() { + let rest = &haystack.as_bytes()[hi..]; + for needle in needles { + let needle = needle.as_bytes(); + if needle.len() > rest.len() { + continue; + } + if needle == &rest[..needle.len()] { + // should never happen in throughput benchmarks. + return true; + } + } + } + false +} + +#[bench] +fn naive_one_byte(b: &mut Bencher) { + bench_naive_no_match(b, vec!["a"], &haystack_same('z')); +} + +#[bench] +fn naive_one_prefix_byte_no_match(b: &mut Bencher) { + bench_naive_no_match(b, vec!["zbc"], &haystack_same('y')); +} + +#[bench] +fn naive_one_prefix_byte_every_match(b: &mut Bencher) { + bench_naive_no_match(b, vec!["zbc"], &haystack_same('z')); +} + +#[bench] +fn naive_one_prefix_byte_random(b: &mut Bencher) { + bench_naive_no_match(b, vec!["zbc\x00"], HAYSTACK_RANDOM); +} + +#[bench] +fn naive_two_bytes(b: &mut Bencher) { + bench_naive_no_match(b, vec!["a", "b"], &haystack_same('z')); +} + +#[bench] +fn naive_two_diff_prefix(b: &mut Bencher) { + bench_naive_no_match(b, vec!["abcdef", "bmnopq"], &haystack_same('z')); +} + +#[bench] +fn naive_two_one_prefix_byte_every_match(b: &mut Bencher) { + bench_naive_no_match(b, vec!["zbcdef", "zmnopq"], &haystack_same('z')); +} + +#[bench] +fn naive_two_one_prefix_byte_no_match(b: &mut Bencher) { + bench_naive_no_match(b, vec!["zbcdef", "zmnopq"], &haystack_same('y')); +} + +#[bench] +fn naive_two_one_prefix_byte_random(b: &mut Bencher) { + bench_naive_no_match(b, vec!["zbcdef\x00", "zmnopq\x00"], HAYSTACK_RANDOM); +} + +#[bench] +fn naive_ten_bytes(b: &mut Bencher) { + let needles = vec!["a", "b", "c", "d", "e", + "f", "g", "h", "i", "j"]; + bench_naive_no_match(b, needles, &haystack_same('z')); +} + +#[bench] +fn naive_ten_diff_prefix(b: &mut Bencher) { + let needles = vec!["abcdef", "bbcdef", "cbcdef", "dbcdef", + "ebcdef", "fbcdef", "gbcdef", "hbcdef", + "ibcdef", "jbcdef"]; + bench_naive_no_match(b, needles, &haystack_same('z')); +} + +#[bench] +fn naive_ten_one_prefix_byte_every_match(b: &mut Bencher) { + let needles = vec!["zacdef", "zbcdef", "zccdef", "zdcdef", + "zecdef", "zfcdef", "zgcdef", "zhcdef", + "zicdef", "zjcdef"]; + bench_naive_no_match(b, needles, &haystack_same('z')); +} + +#[bench] +fn naive_ten_one_prefix_byte_no_match(b: &mut Bencher) { + let needles = vec!["zacdef", "zbcdef", "zccdef", "zdcdef", + "zecdef", "zfcdef", "zgcdef", "zhcdef", + "zicdef", "zjcdef"]; + bench_naive_no_match(b, needles, &haystack_same('y')); +} + +#[bench] +fn naive_ten_one_prefix_byte_random(b: &mut Bencher) { + let needles = vec!["zacdef\x00", "zbcdef\x00", "zccdef\x00", + "zdcdef\x00", "zecdef\x00", "zfcdef\x00", + "zgcdef\x00", "zhcdef\x00", "zicdef\x00", + "zjcdef\x00"]; + bench_naive_no_match(b, needles, HAYSTACK_RANDOM); +} + + +// The organization above is just awful. Let's start over... + +mod sherlock { + use aho_corasick::{Automaton, AcAutomaton}; + use test::Bencher; + use super::HAYSTACK_SHERLOCK; + + macro_rules! sherlock { + ($name:ident, $count:expr, $pats:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let haystack = HAYSTACK_SHERLOCK; + let aut = AcAutomaton::new($pats).into_full(); + b.bytes = haystack.len() as u64; + b.iter(|| assert_eq!($count, aut.find(haystack).count())); + } + } + } + + sherlock!(name_alt1, 158, vec!["Sherlock", "Street"]); + + sherlock!(name_alt2, 558, vec!["Sherlock", "Holmes"]); + + sherlock!(name_alt3, 740, vec![ + "Sherlock", "Holmes", "Watson", "Irene", "Adler", "John", "Baker", + ]); + + sherlock!(name_alt3_nocase, 1764, vec![ + "ADL", "ADl", "AdL", "Adl", "BAK", "BAk", "BAK", "BaK", "Bak", "BaK", + "HOL", "HOl", "HoL", "Hol", "IRE", "IRe", "IrE", "Ire", "JOH", "JOh", + "JoH", "Joh", "SHE", "SHe", "ShE", "She", "WAT", "WAt", "WaT", "Wat", + "aDL", "aDl", "adL", "adl", "bAK", "bAk", "bAK", "baK", "bak", "baK", + "hOL", "hOl", "hoL", "hol", "iRE", "iRe", "irE", "ire", "jOH", "jOh", + "joH", "joh", "sHE", "sHe", "shE", "she", "wAT", "wAt", "waT", "wat", + "ſHE", "ſHe", "ſhE", "ſhe", + ]); + + sherlock!(name_alt4, 582, vec!["Sher", "Hol"]); + + sherlock!(name_alt4_nocase, 1307, vec![ + "HOL", "HOl", "HoL", "Hol", "SHE", "SHe", "ShE", "She", "hOL", "hOl", + "hoL", "hol", "sHE", "sHe", "shE", "she", "ſHE", "ſHe", "ſhE", "ſhe", + ]); + + sherlock!(name_alt5, 639, vec!["Sherlock", "Holmes", "Watson"]); + + sherlock!(name_alt5_nocase, 1442, vec![ + "HOL", "HOl", "HoL", "Hol", "SHE", "SHe", "ShE", "She", "WAT", "WAt", + "WaT", "Wat", "hOL", "hOl", "hoL", "hol", "sHE", "sHe", "shE", "she", + "wAT", "wAt", "waT", "wat", "ſHE", "ſHe", "ſhE", "ſhe", + ]); +} diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/benches/random.txt cargo-0.19.0/vendor/aho-corasick-0.6.3/benches/random.txt --- cargo-0.17.0/vendor/aho-corasick-0.6.3/benches/random.txt 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/benches/random.txt 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,513 @@ + +mnxnsynfvuugtbxsxbfxwreuspglnplefzwsp +tacfqcwnmodnmgnyiuvqoco +z + +qjuozfkexn +zoaxzncje +sldhqtmgxzyurfyzwazmmu +bbeuv +mzsrihycwcb +xzfqozfmlnpmrzpxxxytqs +xrg +mcplby +nmslhfgjowhzfxsvyddydnsyehdskbydbjksqtpet +indvfw +bvjvvw + +pddufodyqtyixbndtumndyz +xjjhtuvmsxhuwqulqtjhqrdqrmtbcphvyuqllocrnkpfv +zemshhz +wss +xewlrxfmgxnwgphcgefa +mbgsgbzrtthxweimcqzcaaheurdmd +osqefupespvh +z +tvvlakwzwjbrgjzfgubsmmonav +pjdskxcfgapsm +zqktqgkrcdrlskx +zwwfebhguskho +zlvvw +czwm +gojnpmboehlsazbexjjnuscqftrfufngygjdxcydib +d +afigycivicnknfxl +ljuwuopctiftfwctxecwipjnljyef +jonwbkodomzhqvlf +jdkizhognqsdogunwedjsmsdzho +zxvni +oynfjf +muvokjuqz +azuwrwtuxzfopwrcex +ixrjinlvxjmn +blaegnmbhsgsbmebwazaeguugtkowexgnqtbfkldadddv +tzabyoftyov +ctbtqbzscxzviuvcigwuwusrdro +ljynr +gnnnyyxslrhsbj +hhzlw +hijalf +rxlfqk +mhaofforwznvmcgplinludpgkucpa +gvvxsqqfmu +xxqhoyosixjfhjuxpv +faadjpvamjekreepizurntvwdynozfawsfawyms + +lcbutr +aqyxvpozkjrecrkl +lfmochahrr +ptqyomjlwo +vcmslulznx +lmlsskcihrmxauztuarydlp +beiqsrfnmvmlmybmwpektjbikvpggthpabqsgmjhnthvysuhwbigillugjsp +dfsuegseffwcsnvsrqedytblbpzbfeyfsq +kypvqctrkuds +ylqeduokzgdqaxelhftxnxbidu +bprzyayfopxdsmfhhfqowa +ymiutdtlfaaxpbtaeslv +ggago + +owpbicekdeykzfgcbgzobdvvrtetvcv +xsrlgingstiez +gyncqvq +xasohmeiwyscpehctmzmsnjklg +xsudghakxlw +dzqlfptjogzpkvwuticcyugnyopypuqqc +wlxshxbhdvuherumoppcc + +znyaptivzncvkpeyeipynqefjxjjcsgfqbnezeebtowdrbjaqjlbxwvyikrmxjwoxngqgvfpbniftnmszuxg +umwpwwyvufy +pallkjtnrmtauqxauewgygwkjjwebbkabhtxticxmxfujpxlrpzlrozfslkzfdsswlmmsbdgjwmjnummk +dhsxylejzityahtqqzmohrpzjprrsraztpnuagtyzfjdekthvdogfidksrdppr +ybc +fyukknoqfnkllkwflwempjijxgo +dltvlau +rhvrvlwsribfctuzodfqkdczfzxnetqqzflnhiyl +goxmcasmq +wljbhwkpahdotqhhrbhqzijv +lszewkgdmkezvgmbmllhpksdkoiwgkvqjmurshrptlctqsosuurndcuzjfwherotv +dudxxihygxblhgchbgzyzffb +eht +fvwxvqoltdcsd +rkuig +e +axhsacsmnicugul +rubtdlhjqndxdzzwfnkuzy +swxteuyxxsktkjgv +hzwwodlqaq +vxgecev +qnwla +vdxjuzpyoqhpmuunyffptopmeauhycs +dkzo +awrfzatzohslgvqlaezepmli +qgxatixvpkkhvkumbwmwcagtgyfljdok +amdnzstpvcqj +xsrvwvhjirzfgkessve +qezwbfltfbikbmoasvoflozsjhrljnszqiciuqmflrlqowwkoevuumh +babskcvavmtvsxqsewirucwzajjcfcqwsydydqo +ywfurpsl +edacsjjkjjewkxfoh +dcgkfpcjezurnuhiatrczcp +xsatnimwbcciu +grzmbrsvvcyigcbmcqfwiiknrohveubhyijxeyzfm +kqyewccgcqrrrznwxmoztlyseagbpyho +najju +nis +awgzdvfjkzlrsjcqfeacx +oisuflfigrjaex +desbdulyuwqxuxianyypybxwlql +ekmqgspvqpftpwswayh +egbyj +fznzprhvnnwcxgcc +wfdsueieosmugirxbymbpmfrspvrktjzguxm +qkjrufshwnfwwpbhukdjlaqvljlgubmqmhnha +hwqpudgnblhlxppbrmbznotteivuzguuwlhtkytky +w +yofkyzbpg +cenolnfnllkvhikrpttcxgqxmufvorekjruyjxmr + +hyexmpjijgzumawp +cdbevdilgopbzlo +fivelagckslkugdxprjxkylizewcptwxfhomzuituujixchadmnjoktnqa +csojvlinzmmkkfzqueamnuwkanzdzsavgohposbuoamoevehqrmcxdsuyelvvctoejzoertqormhaaxwofvjzekwt +sbkghhnhutrvwtyjaxndzyjamrhx +jjyqy +majwbnrhveuhrsbbbjrwpwuplifeseylqh +wyvutpxnkrnkuxxetjkkifpqb +dyzucmbcvgnjeecm +hz +uhnuipthxrzkqluosvk +lwqqzsdwiwvwaqfwlvubadlyizlo +jbd +oyzjeu +kydjkbsqxnbfiuesc +smeubjqrcxdvhsabzceyglqjzbfmoacmwvwjbhhxbr +uabipgecujfdfxpmdzrscdyvefizabgspqjrrkmgjt +xgvdgzryz +lw +uimob +ifhn +bqph +ole +g +wt +k +yslzrkwkundxfdibwqvucemepqxlmlpyngabbeciuzhptpjdetyngrtxrdtzmvq +ccwapidp + +bwvrgvmtshevrophy +ni +fdkplu +mdykey +i +rhsrenoetdggpjb +djmkplpeabsholx +judxtub +fooakqwvocvpcrvxqhvtmpvhkrecy +uuxscjillynilbkrgt +evtinrmilniguarqritpeipwochmdw +sxaqzjybydyvnmmjtdcgkjnqfcklbfpkdfyewgcukqoiegyfp +kg +ovrwieqhy +jcxqtkerzjwhs +xeonglszbgypafhmqcaseimzjgebkvigbqwsayrnrprtuvhsxyitfqygohgorcdnufbcyvevvgzmjrgjqqquwkszplogx +zdketqqv +yebckucwayckeezfvtnavglpjh +zorkfrwk +pad +xqaquxudybwtgixbfktinctfirjfdayh +rieknj +ebk +qzbcfywfdmhsdruhopovemafijbscagllkmhmof + +asbsnbddlobwoqatfhkbhhsymzqxjuixwreheugvngmgcuqpkjhhfwpbarqaxrwgwnjbanljlds +etevdvlc +lqyjrnmenhn +k +tsf +zczgeavcexh +jlpuxywtsrvnvluruqhecjca +ir +rikrgkmhwaosodkxgcnrexfmdrszhnmutpvwztg +bffjqovvkemctnsgeh +weysbhzixiipfithjfsk +usyzvaiyuhmksfluoirfbnsu +o +cgawpdakaszeafdtbdkqtlzkrpnoqomqvuaqcfmzgvfegovtfaonelpv +izmrcjlk +xmzemniyrzy +knqexaafsdlimdamcrprlshq +qkmqw +dntgjwsibclvposdwjuklvtejjjdjibgpyynqpgprvvaetshhmvfkcpb +otvazkrkklrxfotpopyjte +fghkcnpi +rulyaihsowvcgbzeiblhuhhfbmncqsuuqcxvseorn +exirzfmojnxcoqom +zsgpgtokun +zvamxfocorganbtlafifwdqmqtsnktbwwtewborq + +cxlnaspjqvsitjyzyriqsuorjsrvzqenisprttudxntsbqrpjtdkxnwcwgjyxmgtqljcrmrbrmyvosojzlumcmjcgfjsdehec +mvx +mt +mckr +teulvroifk +laaicc +koufy +bexmwsvyarnznebdfy +ripvviosbqijsxnjilwddaqaqemzsdarnxmfooxghoypizwtbueo +ljycycuqwfnzbambibqdixmkkvwtubepla +cis +kcg +vmbbiuuoamenzepuagpfujevfstqtndjxjchdvycfrrrowochtjdmkklgnhf +pmorrwguxkvdxpluatagaziin + +uwvzbmkmykjkmknzppklx +pnzxuvsrjunqxercsnvayhykcazdeclomdsasgkpqpiufyfqsxhj +yceizkddwojgweegcllaagpvrpo +ek +kuxxgbezqyxvfaxdwnqdgqsmneijunxzlwxkrs +ldldbrxmvtjlqxifngmactzqcygkvuteffcmvphevilabgukatqakamjlridznodcvblvlogulmcixxfimh +iuzjootuywjqklolzzhpeaynydjwtufjavbozxnzckuzdodkvkjfmhinelv +swlfkcufscfcovmghqwcrtxjukwafoeogrkgubbqgwzm +gjcylkwgzroubdssuqeykqjcmguso +fzq +srfvysoxtlylctp + +pbfeiuzwoyixews +ocvvunfsjnrtklmuuzjojw +xdjcnrpqhmpmpcwacpcdtmbsczvhllkqapzjuaf +nfnuvjz +fwnuiyqpn +wshxxxpzzxp +hibrxcfeqca + +wqhlllarl +bukcbojv +plrytapy +xm +vlgfqoyzdczqbbaxjwbjjevjhxgopuqvqcrj +vpjqfbdnsdxlbuuiqocvrhap +mgumjbvnnzgnrdru +gcgzugazxdcamrhczfzhtmdjj +uislwq +vooai +zjuqfmebuzsqngzekyajujkopvayxtdzvugwwucvlsbrnhitfotmhhmgddlzlvqrkcponictrfweuilfjiuoabkfdvpjiqjrrgi +aptjfhmrnxaq +hbs +w +mwmoxqvucwygunplzvxtxpk +fgmqmtlorfzytjdzffsosfccnfwugrsrynuej +rpmpenrhsxoefnblyumjqwvuyszyppnttuyvazjdug +zdzxraxkroknkmqgvuoqeqdtvclsvvuwmdwzfugcpteohlogxubyoebvrzbqzklvehfcqadtdrkpubfhmokzwyosogepwragcpwxo +ax +dz +de + +thvkdmnbdws + +ejmubw +umvwkaubzurf +wyxtxeluaoox +wwbioobtgmkebxo +miglgnafmdarzkeblyjctuayzyoeqnfnbtrcbymdzkzg +loavxq +kzhllgsenxlbgdbfzwbg +yxflogzsohlcycbyzegeubfflouvtuatixhjvicjegltjiy +jigqfjppafdiarc +mcnmwtachgearonfcymvjbrnljjxmlzkudvzqsarnfysmxlfrtlvjxwvpdbhvwysnvcdozfcruhjwnucdzakkilmlfgjiolcatpfusm + +n +pdjunfcz +dc +edxkkxabsbvmvifiinnoccki +bc +gwtwsvorwzfqpz +exidmexstfflkhi +s +s +c +wtcjfywlayhpbqktcepoybowtkrmnumqsg +ozclkgjdmdk +jmegtbunyexurvfexhqptnqzie +tkoenpagzwqfawlxvzaijsjqhmg +swodqfjpdqcbkc +ujokogocyaygdibgpglecis +shlmdmgonvpuaxlhrymkxtiytmv +brhk +jmsyiuomiywxhegilycjprkyfgojdo + +wzdzrgpdiosdsvkcw +odlnmsfnjrcsnflviwvawybpczdkzvdocpwrmavz +p +ubowamlskcqhdxuckrxa +fawhntiwhmdwkddnahmtajqqazpdygttqivhdiodkcpcwv +gmxujmmaufmbipaiulhurzkfdg +eixjhmbaeoybiwk +kumntgrgiofcmujlzbcopuobambsw +mnjkqiyb +iktwnsnv +hfuzcl +tqiyqvagbqgtowpjbedgjot +dfemvamelxadkztogliizdtsddoboafawficudlefo +raecmxiiibljryswntpfed +mbwrtsebkeegw +x +epp +he + +vnztrswhiusokqdkmsnpuswucvfhcthjbtam +baxlwidsgbdpzvnlj +tcbjjoadrzo +aiidahyllzzsg + +igebuubweicbssgddpmqxunrawavuglmpxrtkqsvjjtscibqiejjfgfnovokodmqcqitlteiakooupvzkwucucrfdzjvjbqbkgutoybmpfvhbutigdxhfiqfplyciz +cnrhbjdnjftwfwlwzrdkwhajgsizsi +qfntnt +okqyfnbresp +asyg +mjqdkdyggdxzwuzglays +h +ifaqcazoy +fol +vvsusbnugduxsceozmsarbp +epjwtorx +bwiuxxiyc +cw +bwogruhctwkfvbexjnwircykxyzjmats +kygiochfwlpsvmxcgmtjrgvfdptd +q +qmpqe + +z +jghffhqfoecmszunhxmzmzhlmbrvjabhrkihgjmvckhkfpaygjkg + +kfiyfgounmhlvhupswqdgws +ezzdpyqucqoocsdcjtruqpokldfkmjhqzoynirybsifyaxnaxppthjoqy +nwetlgzwrhkhtuubbkbepuhbllxspvagxrqokwnrhkbwdwtp +hlazomrhqogoaxypqaszwfxxmutvbpuuvpdffuqskcbzlwyzcssnflkwiydoveyxjnzllzhyozbsa +hwnitkwbxcyibbqsluuqywbk + +ozpfjsdrc +yoepefuy +lvmspzepnetra +genbrcrmuqfvkaouvuymoxhcxotjjhk +pcshyqgbmqdubsdajnyfqvxkqvywffzn +ukhcbyzwslqeq +otfrmcbnhbyffxqregqoufdxucjunwdhlqqeiiawbxlpqeyzzopfungrryqdykgizrhqodirvazm +dhpfhzyq +cloz +eduupqifolfekve +qiec +ishnjukvomntmdthlkajxpiwk +y +axl +tmyskjqkjsvumizlal +wvvolwewsfxhhdieuagdcuhwsgqvswpbkdkpxskloalmr +ryfmhe +z +mmbpgsyrfvzdatbjrjhuipwt +llzwizmmuulgwocowwmugtaoewkhnqxparvtynlffffdfcocdbba + +pyczkzbmcgrdnxnmezsx +gsqe +mcocxcolcynhpecstsn +opnpplkccobjuhtbhirpzfxuktmpsiwbvsgiaavvdge +wpaldxzasnrbvtugjwytvtfttrh +zxecurevkjiyxy +wtnovebcmglkktic +fdpwfgvlvovxrwh +bmwgdullzy +uzwhagxinwqifxjbcntqzqoxkmpqxhe +jrfizsnwxwnnhb +inapddlahrp + +ndtvkceobe +buskgghihdjmjlwfc +j +rkvffxwtmzoeruhlsurwtnuh +cbvkhfepkdishfpqvijzrpleuy +jzdpxjhcgqnybssfegvrnpgyehdqpgjwudbwrjbavp +xzzvgqdrdwajmdmj +vfatwsxvwfdbdhnijdujoyotwvwjipuuetichcfmvgrsnjpqaaezjtkvc +lbfoqgfshrtwgdqufwnfuitdrjydqctqixlzufkdbp +zgau +qefdpmtkecvtj +kuphldkvnzdtpd +dti +fpd +gfrliyegxsb +i +qsddsrmkyfgzrjeqnitmnypbcakh +vfbvbrpuogzhzrbmklvhji +nkz +xlufbaoblbmeub +alwuzxzmobwdukvwnkiwmuqhuxfhevogdnqtmxjptqznrk +cngpoty + +ms +qvenfg +dmeaffm +jycfgnanbmoamhmarkmjcagbp +ysqmbhopgx +jczbzgwedsp + +zxzwjrxcwdtleizjlvifjwgxiibezwxhtzywqdi +mtgnlu +xboxirdchurkfnklnpkapnqfxnhrxyseiujrznjm + +zm +atddskbghcahlhql +szshwzmmvu +befdtpouamwhiisyybispkchpjhownatawjfbx + +ennkzbrlygd +zbt +upphzpdwzmlhhhbqvjsfmbnrar +ddcs +ipbxgzyudjyongtcyygncojdufnufqpdppgvq +gc +isu +foa +wf +jdlvqxgfbowhohhyyngbcs +zjuwjyucdwblatsnywaaoftlcamfbcnw +lzrioesuhoeevczuwrnltmkahfwiu +uicggfbddqltnjyxfltbnaekncnyxsit +zkxsqkqrwrzrxgxbsgxatybfr + +ptvmfyxdcglbfipcguqthjygzqnpqssscukzawynidtchjrrxwuxifoe +w +ohu +vg +zagpowezvbniybgold +lhqseqcxteiqtgnpanpvrmvvlltxh +mtfnxn +wyodtg + +rawpbgtpbaktqzmmpzxmrlwpvvmdsl +widcfbirvswraukbmkhf +vplrueuxomjkqrtjgyxjdkexttzyozawyq +hrpbahllznvmjudzxpbbv +tlavfrxygjfipkupwnbacltcfepeg +icu +otxcu +aewazy +hl + +fmrp +qaacthwzohenzjr +xbyebba +rvkph +mkhhmh +swme +zjmdoypaktglcyzobquunvthcdwegtbywpijxd +jvkuhnxqc +gibhqgjojsxt +bodbktzomiqujtbstqiyquwvqgufphqstenxvddkvtdh +bpusrxkfi +zgp +pmxvgamydyakituvvsucsuidrlznupcsinltmrahulhepxmhoqtfvpjkxzhrrinncuh +jzgkjjhjqykzelaszvcwvvwbnzsxdeaerfnaravk +ynanrqyrxo +zsmuxofullob +brklgrcqefdyoczy +qkpls +snhqumae +iqdtzjadzzvnqvdvjfsaf +nfqfdqiramueblxkaqxbbkxwywzgdbndjjiqk +tc +kp +cpuckbjsxhtxmomfesgxdpz +oseif +ybhxbvyxrpkrexrhjzoaxxohrhsniewsrktjnaztn +ggelspdzhzbchruhbjbjidgjwdlhdycetqaswh +jkgivsngygkbqtlmoj +dwpnanfvitxg +ospxbwxp +wgvmvrnjescemdoiralbkvemalifxnyhrbdgodml +hjtsnkzknkplbzsiwmneefdkihnhsamjsrxggclyjqgpqltizi + + +sykgbuypwwhweab +nvdkkkskmtiwpoerkon +sx +sbyflwwiqylbskdlxesmylpaz +dnwcjenaluwesyywfaezznwkdwpoesxpu +kie +dslccwfryol +gfhomgfn +zprjtfqvkotktzidmoyrivall +bunvsqkysdelozemnjoeqfolruulpbipm +ullyzfahpkhkja +hwd +kvyqtprpuulgsk +zotbkcadnxmfvqmtlbxalhughceyfcibtzzj +vvpjbgxygl +hpic +mhrqd +dv +thehuzdbaacoidjoljbysnqwrrxxplrdznmgiukkvjqbopb +moszjt +rmtbunktkywqirveeqfa +kse +wbfflnatgzobjrxghjgvcsyxoruenxhyomutbptswjajawqjpqafpdcstkiyjuilimecgejpqmyciolgcmdpcstzdozbmnza diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/.cargo-checksum.json cargo-0.19.0/vendor/aho-corasick-0.6.3/.cargo-checksum.json --- cargo-0.17.0/vendor/aho-corasick-0.6.3/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"d471402ab06e94fb67bda462107845d5b20d9813b6f759fa4ac7f79448f3665c",".travis.yml":"e17babe5ba0bdd19ec59a37b4a099fd4313bff58be63a2ff506075f9a97dc172","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"b3fa06c2147a4749cd984ded69024ddcc8b7d578ab763b60227b3ba474c3ec70","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","Makefile":"a45a128685a2ae7d4fa39d310786674417ee113055ef290a11f88002285865fc","README.md":"9bc60d2cec222b50f87c85cf9475349bb228a36f89796c5d6481c52560ddde3a","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","benches/bench.rs":"acf4844efadeafc7bc396c2b16f2a184e140b6c17d1084dbaf454196de2090cd","benches/random.txt":"9386fb3efedc7ffbd09fb49088347f1056bc2d90a861009fa2f804cdb714efcb","ctags.rust":"3d128d3cc59f702e68953ba2fe6c3f46bc6991fc575308db060482d5da0c79f3","examples/dict-search.rs":"30eb44b1a0b599507db4c23a90f74199faabc64a8ae1d603ecdf3bba7428eb1e","session.vim":"95cb1d7caf0ff7fbe76ec911988d908ddd883381c925ba64b537695bc9f021c4","src/autiter.rs":"98c31a7fbe21cfacaa858f90409f0d86edd46dda1b7651f4e800d929a50afb7b","src/full.rs":"b83a9c8ff3ef611c316b68650915df2d7f361a49b59dab103dc2c5476f2d8303","src/lib.rs":"68bf2ed02d58bebee6f7f7579038f1e4b60a2c4acc334263cb837bcbe15ffe94","src/main.rs":"fc867cb5f0b02d0f49ecab06b72c05a247cbcf3bf9228c235de8e787bda7bef5"},"package":"500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/Cargo.toml cargo-0.19.0/vendor/aho-corasick-0.6.3/Cargo.toml --- cargo-0.17.0/vendor/aho-corasick-0.6.3/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,47 @@ +[package] +name = "aho-corasick" +version = "0.6.3" #:version +authors = ["Andrew Gallant "] +description = "Fast multiple substring searching with finite state machines." +documentation = "http://burntsushi.net/rustdoc/aho_corasick/" +homepage = "https://github.com/BurntSushi/aho-corasick" +repository = "https://github.com/BurntSushi/aho-corasick" +readme = "README.md" +keywords = ["string", "search", "text", "aho", "corasick"] +license = "Unlicense/MIT" +exclude = ["benches/sherlock.txt"] + +[lib] +name = "aho_corasick" + +[[bin]] +name = "aho-corasick-dot" +test = false +doc = false +bench = false + +[dependencies] +memchr = "1" + +[dev-dependencies] +csv = "0.15" +docopt = "0.7" +memmap = "0.5" +quickcheck = { version = "0.4", default-features = false } +rand = "0.3" +rustc-serialize = "0.3" + +[[bench]] +name = "bench" +path = "benches/bench.rs" +test = false +bench = true + +[profile.test] +debug = true + +[profile.bench] +debug = true + +[profile.release] +debug = true diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/COPYING cargo-0.19.0/vendor/aho-corasick-0.6.3/COPYING --- cargo-0.17.0/vendor/aho-corasick-0.6.3/COPYING 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/COPYING 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,3 @@ +This project is dual-licensed under the Unlicense and MIT licenses. + +You may use this code under the terms of either license. diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/ctags.rust cargo-0.19.0/vendor/aho-corasick-0.6.3/ctags.rust --- cargo-0.17.0/vendor/aho-corasick-0.6.3/ctags.rust 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/ctags.rust 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,11 @@ +--langdef=Rust +--langmap=Rust:.rs +--regex-Rust=/^[ \t]*(#\[[^\]]\][ \t]*)*(pub[ \t]+)?(extern[ \t]+)?("[^"]+"[ \t]+)?(unsafe[ \t]+)?fn[ \t]+([a-zA-Z0-9_]+)/\6/f,functions,function definitions/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?type[ \t]+([a-zA-Z0-9_]+)/\2/T,types,type definitions/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?enum[ \t]+([a-zA-Z0-9_]+)/\2/g,enum,enumeration names/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?struct[ \t]+([a-zA-Z0-9_]+)/\2/s,structure names/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?mod[ \t]+([a-zA-Z0-9_]+)/\2/m,modules,module names/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?static[ \t]+([a-zA-Z0-9_]+)/\2/c,consts,static constants/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?trait[ \t]+([a-zA-Z0-9_]+)/\2/t,traits,traits/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?impl([ \t\n]+<.*>)?[ \t]+([a-zA-Z0-9_]+)/\3/i,impls,trait implementations/ +--regex-Rust=/^[ \t]*macro_rules![ \t]+([a-zA-Z0-9_]+)/\1/d,macros,macro definitions/ diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/examples/dict-search.rs cargo-0.19.0/vendor/aho-corasick-0.6.3/examples/dict-search.rs --- cargo-0.17.0/vendor/aho-corasick-0.6.3/examples/dict-search.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/examples/dict-search.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,151 @@ +// This example demonstrates how to use the Aho-Corasick algorithm to rapidly +// scan text for matches in a large dictionary of keywords. This example by +// default reads your system's dictionary (~120,000 words). +extern crate aho_corasick; +extern crate csv; +extern crate docopt; +extern crate memmap; +extern crate rustc_serialize; + +use std::error::Error; +use std::fs::File; +use std::io::{self, BufRead, Write}; +use std::process; + +use aho_corasick::{Automaton, AcAutomaton, Match}; +use docopt::Docopt; +use memmap::{Mmap, Protection}; + +static USAGE: &'static str = " +Usage: dict-search [options] + dict-search --help + +Options: + -d , --dict Path to dictionary of keywords to search. + [default: /usr/share/dict/words] + -m , --min-len The minimum length for a keyword in UTF-8 + encoded bytes. [default: 5] + --overlapping Report overlapping matches. + -c, --count Show only the numebr of matches. + --memory-usage Show memory usage of automaton. + --full Use fully expanded transition matrix. + Warning: may use lots of memory. + -h, --help Show this usage message. +"; + +#[derive(Clone, Debug, RustcDecodable)] +struct Args { + arg_input: String, + flag_dict: String, + flag_min_len: usize, + flag_overlapping: bool, + flag_memory_usage: bool, + flag_full: bool, + flag_count: bool, +} + +fn main() { + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(err) => { + writeln!(&mut io::stderr(), "{}", err).unwrap(); + process::exit(1); + } + } +} + +fn run(args: &Args) -> Result<(), Box> { + let aut = try!(build_automaton(&args.flag_dict, args.flag_min_len)); + if args.flag_memory_usage { + let (bytes, states) = if args.flag_full { + let aut = aut.into_full(); + (aut.heap_bytes(), aut.num_states()) + } else { + (aut.heap_bytes(), aut.num_states()) + }; + println!("{} bytes, {} states", bytes, states); + return Ok(()); + } + + if args.flag_full { + let aut = aut.into_full(); + if args.flag_overlapping { + if args.flag_count { + let mmap = Mmap::open_path( + &args.arg_input, Protection::Read).unwrap(); + let text = unsafe { mmap.as_slice() }; + println!("{}", aut.find_overlapping(text).count()); + } else { + let rdr = try!(File::open(&args.arg_input)); + try!(write_matches(&aut, aut.stream_find_overlapping(rdr))); + } + } else { + if args.flag_count { + let mmap = Mmap::open_path( + &args.arg_input, Protection::Read).unwrap(); + let text = unsafe { mmap.as_slice() }; + println!("{}", aut.find(text).count()); + } else { + let rdr = try!(File::open(&args.arg_input)); + try!(write_matches(&aut, aut.stream_find(rdr))); + } + } + } else { + if args.flag_overlapping { + if args.flag_count { + let mmap = Mmap::open_path( + &args.arg_input, Protection::Read).unwrap(); + let text = unsafe { mmap.as_slice() }; + println!("{}", aut.find_overlapping(text).count()); + } else { + let rdr = try!(File::open(&args.arg_input)); + try!(write_matches(&aut, aut.stream_find_overlapping(rdr))); + } + } else { + if args.flag_count { + let mmap = Mmap::open_path( + &args.arg_input, Protection::Read).unwrap(); + let text = unsafe { mmap.as_slice() }; + println!("{}", aut.find(text).count()); + } else { + let rdr = try!(File::open(&args.arg_input)); + try!(write_matches(&aut, aut.stream_find(rdr))); + } + } + } + Ok(()) +} + +fn write_matches(aut: &A, it: I) -> Result<(), Box> + where A: Automaton, I: Iterator> { + let mut wtr = csv::Writer::from_writer(io::stdout()); + try!(wtr.write(["pattern", "start", "end"].iter())); + for m in it { + let m = try!(m); + try!(wtr.write([ + aut.pattern(m.pati), + &m.start.to_string(), + &m.end.to_string(), + ].iter())); + } + try!(wtr.flush()); + Ok(()) +} + +fn build_automaton( + dict_path: &str, + min_len: usize, +) -> Result, Box> { + let buf = io::BufReader::new(try!(File::open(dict_path))); + let mut lines = Vec::with_capacity(1 << 10); + for line in buf.lines() { + let line = try!(line); + if line.len() >= min_len { + lines.push(line); + } + } + Ok(AcAutomaton::with_transitions(lines)) +} diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/.gitignore cargo-0.19.0/vendor/aho-corasick-0.6.3/.gitignore --- cargo-0.17.0/vendor/aho-corasick-0.6.3/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/.gitignore 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,9 @@ +.*.swp +doc +tags +examples/ss10pusa.csv +build +target +Cargo.lock +scratch* +bench_large/huge diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/LICENSE-MIT cargo-0.19.0/vendor/aho-corasick-0.6.3/LICENSE-MIT --- cargo-0.17.0/vendor/aho-corasick-0.6.3/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/LICENSE-MIT 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Gallant + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/Makefile cargo-0.19.0/vendor/aho-corasick-0.6.3/Makefile --- cargo-0.17.0/vendor/aho-corasick-0.6.3/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/Makefile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,14 @@ +all: + echo Nothing to do... + +ctags: + ctags --recurse --options=ctags.rust --languages=Rust + +docs: + cargo doc + in-dir ./target/doc fix-perms + rscp ./target/doc/* gopher:~/www/burntsushi.net/rustdoc/ + +push: + git push origin master + git push github master diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/README.md cargo-0.19.0/vendor/aho-corasick-0.6.3/README.md --- cargo-0.17.0/vendor/aho-corasick-0.6.3/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/README.md 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,55 @@ +This crate provides an implementation of the +[Aho-Corasick](http://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_string_matching_algorithm) +algorithm. Its intended use case is for fast substring matching, particularly +when matching multiple substrings in a search text. This is achieved by +compiling the substrings into a finite state machine. + +This implementation provides optimal algorithmic time complexity. Construction +of the finite state machine is `O(p)` where `p` is the length of the substrings +concatenated. Matching against search text is `O(n + p + m)`, where `n` is +the length of the search text and `m` is the number of matches. + +[![Build status](https://api.travis-ci.org/BurntSushi/aho-corasick.png)](https://travis-ci.org/BurntSushi/aho-corasick) +[![](http://meritbadge.herokuapp.com/aho-corasick)](https://crates.io/crates/aho-corasick) + +Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org). + + +### Documentation + +[http://burntsushi.net/rustdoc/aho_corasick/](http://burntsushi.net/rustdoc/aho_corasick/). + + +### Example + +The documentation contains several examples, and there is a more complete +example as a full program in `examples/dict-search.rs`. + +Here is a quick example showing simple substring matching: + +```rust +use aho_corasick::{Automaton, AcAutomaton, Match}; + +let aut = AcAutomaton::new(vec!["apple", "maple"]); +let mut it = aut.find("I like maple apples."); +assert_eq!(it.next(), Some(Match { + pati: 1, + start: 7, + end: 12, +})); +assert_eq!(it.next(), Some(Match { + pati: 0, + start: 13, + end: 18, +})); +assert_eq!(it.next(), None); +``` + + +### Alternatives + +Aho-Corasick is useful for matching multiple substrings against many long +strings. If your long string is fixed, then you might consider building a +[suffix array](https://github.com/BurntSushi/suffix) +of the search text (which takes `O(n)` time). Matches can then be found in +`O(plogn)` time. diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/session.vim cargo-0.19.0/vendor/aho-corasick-0.6.3/session.vim --- cargo-0.17.0/vendor/aho-corasick-0.6.3/session.vim 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/session.vim 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +au BufWritePost *.rs silent!make ctags > /dev/null 2>&1 diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/src/autiter.rs cargo-0.19.0/vendor/aho-corasick-0.6.3/src/autiter.rs --- cargo-0.17.0/vendor/aho-corasick-0.6.3/src/autiter.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/src/autiter.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,530 @@ +use std::io::{self, BufRead}; +use std::marker::PhantomData; + +use memchr::{memchr, memchr2, memchr3}; + +use super::{ROOT_STATE, StateIdx}; + +/// An abstraction over automatons and their corresponding iterators. +/// The type parameter `P` is the type of the pattern that was used to +/// construct this Automaton. +pub trait Automaton

{ + /// Return the next state given the current state and next character. + fn next_state(&self, si: StateIdx, b: u8) -> StateIdx; + + /// Return true if and only if the given state and current pattern index + /// indicate a match. + fn has_match(&self, si: StateIdx, outi: usize) -> bool; + + /// Build a match given the current state, pattern index and input index. + fn get_match(&self, si: StateIdx, outi: usize, texti: usize) -> Match; + + /// Return the set of bytes that have transitions in the root state. + fn start_bytes(&self) -> &[u8]; + + /// Returns all of the patterns matched by this automaton. + /// + /// The order of the patterns is the order in which they were added. + fn patterns(&self) -> &[P]; + + /// Returns the pattern indexed at `i`. + /// + /// The index corresponds to the position at which the pattern was added + /// to the automaton, starting at `0`. + fn pattern(&self, i: usize) -> &P; + + /// Return the number of patterns in the automaton. + #[inline] + fn len(&self) -> usize { + self.patterns().len() + } + + /// Returns true if the automaton has no patterns. + #[inline] + fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns an iterator of non-overlapping matches in `s`. + fn find<'a, 's, Q: ?Sized + AsRef<[u8]>>( + &'a self, + s: &'s Q, + ) -> Matches<'a, 's, P, Self> + where Self: Sized { + Matches { + aut: self, + text: s.as_ref(), + texti: 0, + si: ROOT_STATE, + _m: PhantomData, + } + } + + /// Returns an iterator of overlapping matches in `s`. + fn find_overlapping<'a, 's, Q: ?Sized + AsRef<[u8]>>( + &'a self, + s: &'s Q, + ) -> MatchesOverlapping<'a, 's, P, Self> + where Self: Sized { + MatchesOverlapping { + aut: self, + text: s.as_ref(), + texti: 0, + si: ROOT_STATE, + outi: 0, + _m: PhantomData, + } + } + + /// Returns an iterator of non-overlapping matches in the given reader. + fn stream_find<'a, R: io::Read>( + &'a self, + rdr: R, + ) -> StreamMatches<'a, R, P, Self> + where Self: Sized { + StreamMatches { + aut: self, + buf: io::BufReader::new(rdr), + texti: 0, + si: ROOT_STATE, + _m: PhantomData, + } + } + + /// Returns an iterator of overlapping matches in the given reader. + fn stream_find_overlapping<'a, R: io::Read>( + &'a self, + rdr: R, + ) -> StreamMatchesOverlapping<'a, R, P, Self> + where Self: Sized { + StreamMatchesOverlapping { + aut: self, + buf: io::BufReader::new(rdr), + texti: 0, + si: ROOT_STATE, + outi: 0, + _m: PhantomData, + } + } +} + +impl<'a, P: AsRef<[u8]>, A: 'a + Automaton

+ ?Sized> + Automaton

for &'a A { + fn next_state(&self, si: StateIdx, b: u8) -> StateIdx { + (**self).next_state(si, b) + } + + fn has_match(&self, si: StateIdx, outi: usize) -> bool { + (**self).has_match(si, outi) + } + + fn start_bytes(&self) -> &[u8] { + (**self).start_bytes() + } + + fn patterns(&self) -> &[P] { + (**self).patterns() + } + + fn pattern(&self, i: usize) -> &P { + (**self).pattern(i) + } + + fn get_match(&self, si: StateIdx, outi: usize, texti: usize) -> Match { + (**self).get_match(si, outi, texti) + } +} + +/// Records a match in the search text. +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub struct Match { + /// The pattern index. + /// + /// This corresponds to the ordering in which the matched pattern was + /// added to the automaton, starting at `0`. + pub pati: usize, + /// The starting byte offset of the match in the search text. + pub start: usize, + /// The ending byte offset of the match in the search text. + /// + /// (This can be re-captiulated with `pati` and adding the pattern's + /// length to `start`, but it is convenient to have it here.) + pub end: usize, +} + +/// An iterator of non-overlapping matches for in-memory text. +/// +/// This iterator yields `Match` values. +/// +/// `'a` is the lifetime of the automaton, `'s` is the lifetime of the +/// search text, and `P` is the type of the Automaton's pattern. +#[derive(Debug)] +pub struct Matches<'a, 's, P, A: 'a + Automaton

+ ?Sized> { + aut: &'a A, + text: &'s [u8], + texti: usize, + si: StateIdx, + _m: PhantomData

, +} + +// When there's an initial lone start byte, it is usually worth it +// to use `memchr` to skip along the input. The problem is that +// the skipping function is called in the inner match loop, which +// can be quite costly if the skipping condition is never met. +// Therefore, we lift the case analysis outside of the inner loop at +// the cost of repeating code. +// +// `step_to_match` is the version of the inner loop without skipping, +// and `skip_to_match` is the version with skipping. +#[inline(never)] +fn step_to_match + ?Sized>( + aut: &A, + text: &[u8], + mut texti: usize, + mut si: StateIdx +) -> Option<(usize, StateIdx)> { + while texti < text.len() { + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + if texti + 4 < text.len() { + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + } + } + None +} + +fn skip_to_match + ?Sized, F: Fn(&A, &[u8], usize) -> usize>( + aut: &A, + text: &[u8], + mut texti: usize, + mut si: StateIdx, + skip: F, +) -> Option<(usize, StateIdx)> { + if si == ROOT_STATE { + texti = skip(aut, text, texti); + } + while texti < text.len() { + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + if si == ROOT_STATE { + texti = skip(aut, text, texti + 1); + } else { + texti += 1; + if texti + 4 < text.len() { + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + si = aut.next_state(si, text[texti]); + if aut.has_match(si, 0) { + return Some((texti, si)); + } + texti += 1; + } + } + } + None +} + +#[inline] +fn skip1 + ?Sized>( + aut: &A, + text: &[u8], + at: usize, +) -> usize { + debug_assert!(aut.start_bytes().len() == 1); + let b = aut.start_bytes()[0]; + match memchr(b, &text[at..]) { + None => text.len(), + Some(i) => at + i, + } +} + +#[inline] +fn skip2 + ?Sized>( + aut: &A, + text: &[u8], + at: usize, +) -> usize { + debug_assert!(aut.start_bytes().len() == 2); + let (b1, b2) = (aut.start_bytes()[0], aut.start_bytes()[1]); + match memchr2(b1, b2, &text[at..]) { + None => text.len(), + Some(i) => at + i, + } +} + +#[inline] +fn skip3 + ?Sized>( + aut: &A, + text: &[u8], + at: usize, +) -> usize { + debug_assert!(aut.start_bytes().len() == 3); + let (b1, b2, b3) = ( + aut.start_bytes()[0], aut.start_bytes()[1], aut.start_bytes()[2], + ); + match memchr3(b1, b2, b3, &text[at..]) { + None => text.len(), + Some(i) => at + i, + } +} + +impl<'a, 's, P, A: Automaton

+ ?Sized> Iterator for Matches<'a, 's, P, A> { + type Item = Match; + + fn next(&mut self) -> Option { + if self.aut.start_bytes().len() == 1 { + let skip = skip_to_match( + self.aut, self.text, self.texti, self.si, skip1); + if let Some((texti, si)) = skip { + self.texti = texti + 1; + self.si = ROOT_STATE; + return Some(self.aut.get_match(si, 0, texti)); + } + } else if self.aut.start_bytes().len() == 2 { + let skip = skip_to_match( + self.aut, self.text, self.texti, self.si, skip2); + if let Some((texti, si)) = skip { + self.texti = texti + 1; + self.si = ROOT_STATE; + return Some(self.aut.get_match(si, 0, texti)); + } + } else if self.aut.start_bytes().len() == 3 { + let skip = skip_to_match( + self.aut, self.text, self.texti, self.si, skip3); + if let Some((texti, si)) = skip { + self.texti = texti + 1; + self.si = ROOT_STATE; + return Some(self.aut.get_match(si, 0, texti)); + } + } else { + let step = step_to_match(self.aut, self.text, self.texti, self.si); + if let Some((texti, si)) = step { + self.texti = texti + 1; + self.si = ROOT_STATE; + return Some(self.aut.get_match(si, 0, texti)); + } + } + None + } +} + +/// An iterator of non-overlapping matches for streaming text. +/// +/// This iterator yields `io::Result` values. +/// +/// `'a` is the lifetime of the automaton, `R` is the type of the underlying +/// `io::Read`er, and P is the type of the Automaton's pattern. +#[derive(Debug)] +pub struct StreamMatches<'a, R, P, A: 'a + Automaton

+ ?Sized> { + aut: &'a A, + buf: io::BufReader, + texti: usize, + si: StateIdx, + _m: PhantomData

, +} + +impl<'a, R: io::Read, P, A: Automaton

> + Iterator for StreamMatches<'a, R, P, A> { + type Item = io::Result; + + fn next(&mut self) -> Option> { + let mut m = None; + let mut consumed = 0; +'LOOP: loop { + self.buf.consume(consumed); + let bs = match self.buf.fill_buf() { + Err(err) => return Some(Err(err)), + Ok(bs) if bs.len() == 0 => break, + Ok(bs) => bs, + }; + consumed = bs.len(); // is shortened if we find a match + for (i, &b) in bs.iter().enumerate() { + self.si = self.aut.next_state(self.si, b); + if self.aut.has_match(self.si, 0) { + m = Some(Ok(self.aut.get_match(self.si, 0, self.texti))); + consumed = i + 1; + self.texti += 1; + self.si = ROOT_STATE; + break 'LOOP; + } + self.texti += 1; + } + } + self.buf.consume(consumed); + m + } +} + +/// An iterator of overlapping matches for in-memory text. +/// +/// This iterator yields `Match` values. +/// +/// `'a` is the lifetime of the automaton, `'s` is the lifetime of the +/// search text, and `P` is the type of the Automaton's pattern. +#[derive(Debug)] +pub struct MatchesOverlapping<'a, 's, P, A: 'a + Automaton

+ ?Sized> { + aut: &'a A, + text: &'s [u8], + texti: usize, + si: StateIdx, + outi: usize, + _m: PhantomData

, +} + +impl<'a, 's, P, A: Automaton

+ ?Sized> + Iterator for MatchesOverlapping<'a, 's, P, A> { + type Item = Match; + + fn next(&mut self) -> Option { + if self.aut.has_match(self.si, self.outi) { + let m = self.aut.get_match(self.si, self.outi, self.texti); + self.outi += 1; + if !self.aut.has_match(self.si, self.outi) { + self.texti += 1; + } + return Some(m); + } + + self.outi = 0; + if self.aut.start_bytes().len() == 1 { + let skip = skip_to_match( + self.aut, self.text, self.texti, self.si, skip1); + if let Some((texti, si)) = skip { + self.texti = texti; + self.si = si; + return self.next(); + } + } else if self.aut.start_bytes().len() == 2 { + let skip = skip_to_match( + self.aut, self.text, self.texti, self.si, skip2); + if let Some((texti, si)) = skip { + self.texti = texti; + self.si = si; + return self.next(); + } + } else if self.aut.start_bytes().len() == 3 { + let skip = skip_to_match( + self.aut, self.text, self.texti, self.si, skip3); + if let Some((texti, si)) = skip { + self.texti = texti; + self.si = si; + return self.next(); + } + } else { + let step = step_to_match(self.aut, self.text, self.texti, self.si); + if let Some((texti, si)) = step { + self.texti = texti; + self.si = si; + return self.next(); + } + } + None + } +} + +/// An iterator of overlapping matches for streaming text. +/// +/// This iterator yields `io::Result` values. +/// +/// `'a` is the lifetime of the automaton, `R` is the type of the underlying +/// `io::Read`er, and P is the type of the Automaton's pattern. +#[derive(Debug)] +pub struct StreamMatchesOverlapping<'a, R, P, A: 'a + Automaton

+ ?Sized> { + aut: &'a A, + buf: io::BufReader, + texti: usize, + si: StateIdx, + outi: usize, + _m: PhantomData

, +} + +impl<'a, R: io::Read, P, A: Automaton

+ ?Sized> + Iterator for StreamMatchesOverlapping<'a, R, P, A> { + type Item = io::Result; + + fn next(&mut self) -> Option> { + if self.aut.has_match(self.si, self.outi) { + let m = self.aut.get_match(self.si, self.outi, self.texti); + self.outi += 1; + if !self.aut.has_match(self.si, self.outi) { + self.texti += 1; + } + return Some(Ok(m)); + } + let mut m = None; + let mut consumed = 0; + self.outi = 0; +'LOOP: loop { + self.buf.consume(consumed); + let bs = match self.buf.fill_buf() { + Err(err) => return Some(Err(err)), + Ok(bs) if bs.len() == 0 => break, + Ok(bs) => bs, + }; + consumed = bs.len(); // is shortened if we find a match + for (i, &b) in bs.iter().enumerate() { + self.si = self.aut.next_state(self.si, b); + if self.aut.has_match(self.si, self.outi) { + m = Some(Ok(self.aut.get_match( + self.si, self.outi, self.texti))); + consumed = i + 1; + self.outi += 1; + if !self.aut.has_match(self.si, self.outi) { + self.texti += 1; + } + break 'LOOP; + } + self.texti += 1; + } + } + self.buf.consume(consumed); + m + } +} diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/src/full.rs cargo-0.19.0/vendor/aho-corasick-0.6.3/src/full.rs --- cargo-0.17.0/vendor/aho-corasick-0.6.3/src/full.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/src/full.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,136 @@ +use std::fmt; +use std::mem; + +use super::{ + FAIL_STATE, + StateIdx, AcAutomaton, Transitions, Match, + usize_bytes, vec_bytes, +}; +use super::autiter::Automaton; + +/// A complete Aho-Corasick automaton. +/// +/// This uses a single transition matrix that permits each input character +/// to move to the next state with a single lookup in the matrix. +/// +/// This is as fast as it gets, but it is guaranteed to use a lot of memory. +/// Namely, it will use at least `4 * 256 * #states`, where the number of +/// states is capped at length of all patterns concatenated. +#[derive(Clone)] +pub struct FullAcAutomaton

{ + pats: Vec

, + trans: Vec, // row-major, where states are rows + out: Vec>, // indexed by StateIdx + start_bytes: Vec, +} + +impl> FullAcAutomaton

{ + /// Build a new expanded Aho-Corasick automaton from an existing + /// Aho-Corasick automaton. + pub fn new(ac: AcAutomaton) -> FullAcAutomaton

{ + let mut fac = FullAcAutomaton { + pats: vec![], + trans: vec![FAIL_STATE; 256 * ac.states.len()], + out: vec![vec![]; ac.states.len()], + start_bytes: vec![], + }; + fac.build_matrix(&ac); + fac.pats = ac.pats; + fac.start_bytes = ac.start_bytes; + fac + } + + #[doc(hidden)] + pub fn memory_usage(&self) -> usize { + self.pats.iter() + .map(|p| vec_bytes() + p.as_ref().len()) + .fold(0, |a, b| a + b) + + (4 * self.trans.len()) + + self.out.iter() + .map(|v| vec_bytes() + (usize_bytes() * v.len())) + .fold(0, |a, b| a + b) + + self.start_bytes.len() + } + + #[doc(hidden)] + pub fn heap_bytes(&self) -> usize { + self.pats.iter() + .map(|p| mem::size_of::

() + p.as_ref().len()) + .fold(0, |a, b| a + b) + + (4 * self.trans.len()) + + self.out.iter() + .map(|v| vec_bytes() + (usize_bytes() * v.len())) + .fold(0, |a, b| a + b) + + self.start_bytes.len() + } + + fn set(&mut self, si: StateIdx, i: u8, goto: StateIdx) { + let ns = self.num_states(); + self.trans[i as usize * ns + si as usize] = goto; + } + + #[doc(hidden)] + #[inline] + pub fn num_states(&self) -> usize { + self.out.len() + } +} + +impl> Automaton

for FullAcAutomaton

{ + #[inline] + fn next_state(&self, si: StateIdx, i: u8) -> StateIdx { + let at = i as usize * self.num_states() + si as usize; + unsafe { *self.trans.get_unchecked(at) } + } + + #[inline] + fn get_match(&self, si: StateIdx, outi: usize, texti: usize) -> Match { + let pati = self.out[si as usize][outi]; + let patlen = self.pats[pati].as_ref().len(); + let start = texti + 1 - patlen; + Match { + pati: pati, + start: start, + end: start + patlen, + } + } + + #[inline] + fn has_match(&self, si: StateIdx, outi: usize) -> bool { + unsafe { outi < self.out.get_unchecked(si as usize).len() } + } + + #[inline] + fn start_bytes(&self) -> &[u8] { + &self.start_bytes + } + + #[inline] + fn patterns(&self) -> &[P] { + &self.pats + } + + #[inline] + fn pattern(&self, i: usize) -> &P { + &self.pats[i] + } +} + +impl> FullAcAutomaton

{ + fn build_matrix(&mut self, ac: &AcAutomaton) { + for (si, s) in ac.states.iter().enumerate().skip(1) { + for b in (0..256).map(|b| b as u8) { + self.set(si as StateIdx, b, ac.next_state(si as StateIdx, b)); + } + for &pati in &s.out { + self.out[si].push(pati); + } + } + } +} + +impl + fmt::Debug> fmt::Debug for FullAcAutomaton

{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "FullAcAutomaton({:?})", self.pats) + } +} diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/src/lib.rs cargo-0.19.0/vendor/aho-corasick-0.6.3/src/lib.rs --- cargo-0.17.0/vendor/aho-corasick-0.6.3/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/src/lib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,925 @@ +/*! +An implementation of the +[Aho-Corasick string search algorithm](https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_string_matching_algorithm). + +The Aho-Corasick algorithm is principally useful when you need to search many +large texts for a fixed (possibly large) set of keywords. In particular, the +Aho-Corasick algorithm preprocesses the set of keywords by constructing a +finite state machine. The search phase is then a quick linear scan through the +text. Each character in the search text causes a state transition in the +automaton. Matches are reported when the automaton enters a match state. + +# Examples + +The main type exposed by this crate is `AcAutomaton`, which can be constructed +from an iterator of pattern strings: + +```rust +use aho_corasick::{Automaton, AcAutomaton}; + +let aut = AcAutomaton::new(vec!["apple", "maple"]); + +// AcAutomaton also implements `FromIterator`: +let aut: AcAutomaton<&str> = ["apple", "maple"].iter().cloned().collect(); +``` + +Finding matches can be done with `find`: + +```rust +use aho_corasick::{Automaton, AcAutomaton, Match}; + +let aut = AcAutomaton::new(vec!["apple", "maple"]); +let mut it = aut.find("I like maple apples."); +assert_eq!(it.next(), Some(Match { + pati: 1, + start: 7, + end: 12, +})); +assert_eq!(it.next(), Some(Match { + pati: 0, + start: 13, + end: 18, +})); +assert_eq!(it.next(), None); +``` + +Use `find_overlapping` if you want to report all matches, even if they +overlap with each other. + +```rust +use aho_corasick::{Automaton, AcAutomaton, Match}; + +let aut = AcAutomaton::new(vec!["abc", "a"]); +let matches: Vec<_> = aut.find_overlapping("abc").collect(); +assert_eq!(matches, vec![ + Match { pati: 1, start: 0, end: 1}, Match { pati: 0, start: 0, end: 3 }, +]); + +// Regular `find` will report only one match: +let matches: Vec<_> = aut.find("abc").collect(); +assert_eq!(matches, vec![Match { pati: 1, start: 0, end: 1}]); +``` + +Finally, there are also methods for finding matches on *streams*. Namely, the +search text does not have to live in memory. It's useful to run this on files +that can't fit into memory: + +```no_run +use std::fs::File; + +use aho_corasick::{Automaton, AcAutomaton}; + +let aut = AcAutomaton::new(vec!["foo", "bar", "baz"]); +let rdr = File::open("search.txt").unwrap(); +for m in aut.stream_find(rdr) { + let m = m.unwrap(); // could be an IO error + println!("Pattern '{}' matched at: ({}, {})", + aut.pattern(m.pati), m.start, m.end); +} +``` + +There is also `stream_find_overlapping`, which is just like `find_overlapping`, +but it operates on streams. + +Please see `dict-search.rs` in this crate's `examples` directory for a more +complete example. It creates a large automaton from a dictionary and can do a +streaming match over arbitrarily large data. + +# Memory usage + +A key aspect of an Aho-Corasick implementation is how the state transitions +are represented. The easiest way to make the automaton fast is to store a +sparse 256-slot map in each state. It maps an input byte to a state index. +This makes the matching loop extremely fast, since it translates to a simple +pointer read. + +The problem is that as the automaton accumulates more states, you end up paying +a `256 * 4` (`4` is for the `u32` state index) byte penalty for every state +regardless of how many transitions it has. + +To solve this, only states near the root of the automaton have this sparse +map representation. States near the leaves of the automaton use a dense mapping +that requires a linear scan. + +(The specific limit currently set is `3`, so that states with a depth less than +or equal to `3` are less memory efficient. The result is that the memory usage +of the automaton stops growing rapidly past ~60MB, even for automatons with +thousands of patterns.) + +If you'd like to opt for the less-memory-efficient-but-faster version, then +you can construct an `AcAutomaton` with a `Sparse` transition strategy: + +```rust +use aho_corasick::{Automaton, AcAutomaton, Match, Sparse}; + +let aut = AcAutomaton::<&str, Sparse>::with_transitions(vec!["abc", "a"]); +let matches: Vec<_> = aut.find("abc").collect(); +assert_eq!(matches, vec![Match { pati: 1, start: 0, end: 1}]); +``` +*/ + +#![deny(missing_docs)] + +extern crate memchr; +#[cfg(test)] extern crate quickcheck; +#[cfg(test)] extern crate rand; + +use std::collections::VecDeque; +use std::fmt; +use std::iter::FromIterator; +use std::mem; + +pub use self::autiter::{ + Automaton, Match, + Matches, MatchesOverlapping, StreamMatches, StreamMatchesOverlapping, +}; +pub use self::full::FullAcAutomaton; + +// We're specifying paths explicitly so that we can use +// these modules simultaneously from `main.rs`. +// Should probably make just make `main.rs` a separate crate. +#[path = "autiter.rs"] +mod autiter; +#[path = "full.rs"] +mod full; + +/// The integer type used for the state index. +/// +/// Limiting this to 32 bit integers can have a big impact on memory usage +/// when using the `Sparse` transition representation. +pub type StateIdx = u32; + +// Constants for special state indexes. +const FAIL_STATE: u32 = 0; +const ROOT_STATE: u32 = 1; + +// Limit the depth at which we use a sparse alphabet map. Once the limit is +// reached, a dense set is used (and lookup becomes O(n)). +// +// This does have a performance hit, but the (straight forward) alternative +// is to have a `256 * 4` byte overhead for every state. +// Given that Aho-Corasick is typically used for dictionary searching, this +// can lead to dramatic memory bloat. +// +// This limit should only be increased at your peril. Namely, in the worst +// case, `256^DENSE_DEPTH_THRESHOLD * 4` corresponds to the memory usage in +// bytes. A value of `1` gives us a good balance. This is also a happy point +// in the benchmarks. A value of `0` gives considerably worse times on certain +// benchmarks (e.g., `ac_ten_one_prefix_byte_every_match`) than even a value +// of `1`. A value of `2` is slightly better than `1` and it looks like gains +// level off at that point with not much observable difference when set to +// `3`. +// +// Why not make this user configurable? Well, it doesn't make much sense +// because we pay for it with case analysis in the matching loop. Increasing it +// doesn't have much impact on performance (outside of pathological cases?). +// +// N.B. Someone else seems to have discovered an alternative, but I haven't +// grokked it yet: https://github.com/mischasan/aho-corasick +const DENSE_DEPTH_THRESHOLD: u32 = 1; + +/// An Aho-Corasick finite automaton. +/// +/// The type parameter `P` is the type of the pattern that was used to +/// construct this AcAutomaton. +#[derive(Clone)] +pub struct AcAutomaton { + pats: Vec

, + states: Vec>, + start_bytes: Vec, +} + +#[derive(Clone)] +struct State { + out: Vec, + fail: StateIdx, + goto: T, + depth: u32, +} + +impl> AcAutomaton

{ + /// Create a new automaton from an iterator of patterns. + /// + /// The patterns must be convertible to bytes (`&[u8]`) via the `AsRef` + /// trait. + pub fn new(pats: I) -> AcAutomaton + where I: IntoIterator { + AcAutomaton::with_transitions(pats) + } +} + +impl, T: Transitions> AcAutomaton { + /// Create a new automaton from an iterator of patterns. + /// + /// This constructor allows one to choose the transition representation. + /// + /// The patterns must be convertible to bytes (`&[u8]`) via the `AsRef` + /// trait. + pub fn with_transitions(pats: I) -> AcAutomaton + where I: IntoIterator { + AcAutomaton { + pats: vec![], // filled in later, avoid wrath of borrow checker + states: vec![State::new(0), State::new(0)], // empty and root + start_bytes: vec![], // also filled in later + }.build(pats.into_iter().collect()) + } + + /// Build out the entire automaton into a single matrix. + /// + /// This will make searching as fast as possible at the expense of using + /// at least `4 * 256 * #states` bytes of memory. + pub fn into_full(self) -> FullAcAutomaton

{ + FullAcAutomaton::new(self) + } + + #[doc(hidden)] + pub fn num_states(&self) -> usize { + self.states.len() + } + + #[doc(hidden)] + pub fn heap_bytes(&self) -> usize { + self.pats.iter() + .map(|p| mem::size_of::

() + p.as_ref().len()) + .fold(0, |a, b| a + b) + + self.states.iter() + .map(|s| mem::size_of::>() + s.heap_bytes()) + .fold(0, |a, b| a + b) + + self.start_bytes.len() + } +} + +impl, T: Transitions> Automaton

for AcAutomaton { + #[inline] + fn next_state(&self, mut si: StateIdx, b: u8) -> StateIdx { + loop { + let maybe_si = self.states[si as usize].goto(b); + if maybe_si != FAIL_STATE { + si = maybe_si; + break; + } else { + si = self.states[si as usize].fail; + } + } + si + } + + #[inline] + fn get_match(&self, si: StateIdx, outi: usize, texti: usize) -> Match { + let pati = self.states[si as usize].out[outi]; + let patlen = self.pats[pati].as_ref().len(); + let start = texti + 1 - patlen; + Match { + pati: pati, + start: start, + end: start + patlen, + } + } + + #[inline] + fn has_match(&self, si: StateIdx, outi: usize) -> bool { + outi < self.states[si as usize].out.len() + } + + #[inline] + fn start_bytes(&self) -> &[u8] { + &self.start_bytes + } + + #[inline] + fn patterns(&self) -> &[P] { + &self.pats + } + + #[inline] + fn pattern(&self, i: usize) -> &P { + &self.pats[i] + } +} + +// Below contains code for *building* the automaton. It's a reasonably faithful +// translation of the description/psuedo-code from: +// http://www.cs.uku.fi/~kilpelai/BSA05/lectures/slides04.pdf + +impl, T: Transitions> AcAutomaton { + // This is the first phase and builds the initial keyword tree. + fn build(mut self, pats: Vec

) -> AcAutomaton { + for (pati, pat) in pats.iter().enumerate() { + if pat.as_ref().is_empty() { + continue; + } + let mut previ = ROOT_STATE; + for &b in pat.as_ref() { + if self.states[previ as usize].goto(b) != FAIL_STATE { + previ = self.states[previ as usize].goto(b); + } else { + let depth = self.states[previ as usize].depth + 1; + let nexti = self.add_state(State::new(depth)); + self.states[previ as usize].set_goto(b, nexti); + previ = nexti; + } + } + self.states[previ as usize].out.push(pati); + } + for c in (0..256).into_iter().map(|c| c as u8) { + if self.states[ROOT_STATE as usize].goto(c) == FAIL_STATE { + self.states[ROOT_STATE as usize].set_goto(c, ROOT_STATE); + } else { + self.start_bytes.push(c); + } + } + // If any of the start bytes are non-ASCII, then remove them all, + // because we don't want to be calling memchr on non-ASCII bytes. + // (Well, we could, but it requires being more clever. Simply using + // the prefix byte isn't good enough.) + if self.start_bytes.iter().any(|&b| b > 0x7F) { + self.start_bytes.clear(); + } + self.pats = pats; + self.fill() + } + + // The second phase that fills in the back links. + fn fill(mut self) -> AcAutomaton { + // Fill up the queue with all non-root transitions out of the root + // node. Then proceed by breadth first traversal. + let mut q = VecDeque::new(); + for c in (0..256).into_iter().map(|c| c as u8) { + let si = self.states[ROOT_STATE as usize].goto(c); + if si != ROOT_STATE { + q.push_front(si); + } + } + while let Some(si) = q.pop_back() { + for c in (0..256).into_iter().map(|c| c as u8) { + let u = self.states[si as usize].goto(c); + if u != FAIL_STATE { + q.push_front(u); + let mut v = self.states[si as usize].fail; + while self.states[v as usize].goto(c) == FAIL_STATE { + v = self.states[v as usize].fail; + } + let ufail = self.states[v as usize].goto(c); + self.states[u as usize].fail = ufail; + let ufail_out = self.states[ufail as usize].out.clone(); + self.states[u as usize].out.extend(ufail_out); + } + } + } + self + } + + fn add_state(&mut self, state: State) -> StateIdx { + let i = self.states.len(); + self.states.push(state); + i as StateIdx + } +} + +impl State { + fn new(depth: u32) -> State { + State { + out: vec![], + fail: 1, + goto: Transitions::new(depth), + depth: depth, + } + } + + fn goto(&self, b: u8) -> StateIdx { + self.goto.goto(b) + } + + fn set_goto(&mut self, b: u8, si: StateIdx) { + self.goto.set_goto(b, si); + } + + fn heap_bytes(&self) -> usize { + (self.out.len() * usize_bytes()) + + self.goto.heap_bytes() + } +} + +/// An abstraction over state transition strategies. +/// +/// This is an attempt to let the caller choose the space/time trade offs +/// used for state transitions. +/// +/// (It's possible that this interface is merely good enough for just the two +/// implementations in this crate.) +pub trait Transitions { + /// Return a new state at the given depth. + fn new(depth: u32) -> Self; + /// Return the next state index given the next character. + fn goto(&self, alpha: u8) -> StateIdx; + /// Set the next state index for the character given. + fn set_goto(&mut self, alpha: u8, si: StateIdx); + /// The memory use in bytes (on the heap) of this set of transitions. + fn heap_bytes(&self) -> usize; +} + +/// State transitions that can be stored either sparsely or densely. +/// +/// This uses less space but at the expense of slower matching. +#[derive(Clone, Debug)] +pub struct Dense(DenseChoice); + +#[derive(Clone, Debug)] +enum DenseChoice { + Sparse(Vec), // indexed by alphabet + Dense(Vec<(u8, StateIdx)>), +} + +impl Transitions for Dense { + fn new(depth: u32) -> Dense { + if depth <= DENSE_DEPTH_THRESHOLD { + Dense(DenseChoice::Sparse(vec![0; 256])) + } else { + Dense(DenseChoice::Dense(vec![])) + } + } + + fn goto(&self, b1: u8) -> StateIdx { + match self.0 { + DenseChoice::Sparse(ref m) => m[b1 as usize], + DenseChoice::Dense(ref m) => { + for &(b2, si) in m { + if b1 == b2 { + return si; + } + } + FAIL_STATE + } + } + } + + fn set_goto(&mut self, b: u8, si: StateIdx) { + match self.0 { + DenseChoice::Sparse(ref mut m) => m[b as usize] = si, + DenseChoice::Dense(ref mut m) => m.push((b, si)), + } + } + + fn heap_bytes(&self) -> usize { + match self.0 { + DenseChoice::Sparse(ref m) => m.len() * 4, + DenseChoice::Dense(ref m) => m.len() * (1 + 4), + } + } +} + +/// State transitions that are always sparse. +/// +/// This can use enormous amounts of memory when there are many patterns, +/// but matching is very fast. +#[derive(Clone, Debug)] +pub struct Sparse(Vec); + +impl Transitions for Sparse { + fn new(_: u32) -> Sparse { + Sparse(vec![0; 256]) + } + + #[inline] + fn goto(&self, b: u8) -> StateIdx { + self.0[b as usize] + } + + fn set_goto(&mut self, b: u8, si: StateIdx) { + self.0[b as usize] = si; + } + + fn heap_bytes(&self) -> usize { + self.0.len() * 4 + } +} + +impl> FromIterator for AcAutomaton { + /// Create an automaton from an iterator of strings. + fn from_iter(it: T) -> AcAutomaton where T: IntoIterator { + AcAutomaton::new(it) + } +} + +// Provide some question debug impls for viewing automatons. +// The custom impls mostly exist for special showing of sparse maps. + +impl + fmt::Debug, T: Transitions> + fmt::Debug for AcAutomaton { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use std::iter::repeat; + + try!(writeln!(f, "{}", repeat('-').take(79).collect::())); + try!(writeln!(f, "Patterns: {:?}", self.pats)); + for (i, state) in self.states.iter().enumerate().skip(1) { + try!(writeln!(f, "{:3}: {}", i, state.debug(i == 1))); + } + write!(f, "{}", repeat('-').take(79).collect::()) + } +} + +impl State { + fn debug(&self, root: bool) -> String { + format!("State {{ depth: {:?}, out: {:?}, fail: {:?}, goto: {{{}}} }}", + self.depth, self.out, self.fail, self.goto_string(root)) + } + + fn goto_string(&self, root: bool) -> String { + use std::char::from_u32; + + let mut goto = vec![]; + for b in (0..256).map(|b| b as u8) { + let si = self.goto(b); + if (!root && si == FAIL_STATE) || (root && si == ROOT_STATE) { + continue; + } + goto.push(format!("{} => {}", from_u32(b as u32).unwrap(), si)); + } + goto.join(", ") + } +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.debug(false)) + } +} + +impl AcAutomaton { + #[doc(hidden)] + pub fn dot(&self) -> String { + use std::fmt::Write; + let mut out = String::new(); + macro_rules! w { + ($w:expr, $($tt:tt)*) => { {write!($w, $($tt)*)}.unwrap() } + } + + w!(out, r#" +digraph automaton {{ + label=<{}>; + labelloc="l"; + labeljust="l"; + rankdir="LR"; +"#, self.pats.join(", ")); + for (i, s) in self.states.iter().enumerate().skip(1) { + let i = i as u32; + if s.out.len() == 0 { + w!(out, " {};\n", i); + } else { + w!(out, " {} [peripheries=2];\n", i); + } + w!(out, " {} -> {} [style=dashed];\n", i, s.fail); + for b in (0..256).map(|b| b as u8) { + let si = s.goto(b); + if si == FAIL_STATE || (i == ROOT_STATE && si == ROOT_STATE) { + continue; + } + w!(out, " {} -> {} [label={}];\n", i, si, b as char); + } + } + w!(out, "}}"); + out + } +} + +fn vec_bytes() -> usize { + usize_bytes() * 3 +} + +fn usize_bytes() -> usize { + let bits = usize::max_value().count_ones() as usize; + bits / 8 +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + use std::io; + + use quickcheck::{Arbitrary, Gen, quickcheck}; + + use super::{Automaton, AcAutomaton, Match}; + + fn aut_find(xs: &[S], haystack: &str) -> Vec + where S: Clone + AsRef<[u8]> { + AcAutomaton::new(xs.to_vec()).find(&haystack).collect() + } + + fn aut_finds(xs: &[S], haystack: &str) -> Vec + where S: Clone + AsRef<[u8]> { + let cur = io::Cursor::new(haystack.as_bytes()); + AcAutomaton::new(xs.to_vec()) + .stream_find(cur).map(|r| r.unwrap()).collect() + } + + fn aut_findf(xs: &[S], haystack: &str) -> Vec + where S: Clone + AsRef<[u8]> { + AcAutomaton::new(xs.to_vec()).into_full().find(haystack).collect() + } + + fn aut_findfs(xs: &[S], haystack: &str) -> Vec + where S: Clone + AsRef<[u8]> { + let cur = io::Cursor::new(haystack.as_bytes()); + AcAutomaton::new(xs.to_vec()) + .into_full() + .stream_find(cur).map(|r| r.unwrap()).collect() + } + + fn aut_findo(xs: &[S], haystack: &str) -> Vec + where S: Clone + AsRef<[u8]> { + AcAutomaton::new(xs.to_vec()).find_overlapping(haystack).collect() + } + + fn aut_findos(xs: &[S], haystack: &str) -> Vec + where S: Clone + AsRef<[u8]> { + let cur = io::Cursor::new(haystack.as_bytes()); + AcAutomaton::new(xs.to_vec()) + .stream_find_overlapping(cur).map(|r| r.unwrap()).collect() + } + + fn aut_findfo(xs: &[S], haystack: &str) -> Vec + where S: Clone + AsRef<[u8]> { + AcAutomaton::new(xs.to_vec()) + .into_full().find_overlapping(haystack).collect() + } + + fn aut_findfos(xs: &[S], haystack: &str) -> Vec + where S: Clone + AsRef<[u8]> { + let cur = io::Cursor::new(haystack.as_bytes()); + AcAutomaton::new(xs.to_vec()) + .into_full() + .stream_find_overlapping(cur).map(|r| r.unwrap()).collect() + } + + #[test] + fn one_pattern_one_match() { + let ns = vec!["a"]; + let hay = "za"; + let matches = vec![ + Match { pati: 0, start: 1, end: 2 }, + ]; + assert_eq!(&aut_find(&ns, hay), &matches); + assert_eq!(&aut_finds(&ns, hay), &matches); + assert_eq!(&aut_findf(&ns, hay), &matches); + assert_eq!(&aut_findfs(&ns, hay), &matches); + } + + #[test] + fn one_pattern_many_match() { + let ns = vec!["a"]; + let hay = "zazazzzza"; + let matches = vec![ + Match { pati: 0, start: 1, end: 2 }, + Match { pati: 0, start: 3, end: 4 }, + Match { pati: 0, start: 8, end: 9 }, + ]; + assert_eq!(&aut_find(&ns, hay), &matches); + assert_eq!(&aut_finds(&ns, hay), &matches); + assert_eq!(&aut_findf(&ns, hay), &matches); + assert_eq!(&aut_findfs(&ns, hay), &matches); + } + + #[test] + fn one_longer_pattern_one_match() { + let ns = vec!["abc"]; + let hay = "zazabcz"; + let matches = vec![ Match { pati: 0, start: 3, end: 6 } ]; + assert_eq!(&aut_find(&ns, hay), &matches); + assert_eq!(&aut_finds(&ns, hay), &matches); + assert_eq!(&aut_findf(&ns, hay), &matches); + assert_eq!(&aut_findfs(&ns, hay), &matches); + } + + #[test] + fn one_longer_pattern_many_match() { + let ns = vec!["abc"]; + let hay = "zazabczzzzazzzabc"; + let matches = vec![ + Match { pati: 0, start: 3, end: 6 }, + Match { pati: 0, start: 14, end: 17 }, + ]; + assert_eq!(&aut_find(&ns, hay), &matches); + assert_eq!(&aut_finds(&ns, hay), &matches); + assert_eq!(&aut_findf(&ns, hay), &matches); + assert_eq!(&aut_findfs(&ns, hay), &matches); + } + + #[test] + fn many_pattern_one_match() { + let ns = vec!["a", "b"]; + let hay = "zb"; + let matches = vec![ Match { pati: 1, start: 1, end: 2 } ]; + assert_eq!(&aut_find(&ns, hay), &matches); + assert_eq!(&aut_finds(&ns, hay), &matches); + assert_eq!(&aut_findf(&ns, hay), &matches); + assert_eq!(&aut_findfs(&ns, hay), &matches); + } + + #[test] + fn many_pattern_many_match() { + let ns = vec!["a", "b"]; + let hay = "zbzazzzzb"; + let matches = vec![ + Match { pati: 1, start: 1, end: 2 }, + Match { pati: 0, start: 3, end: 4 }, + Match { pati: 1, start: 8, end: 9 }, + ]; + assert_eq!(&aut_find(&ns, hay), &matches); + assert_eq!(&aut_finds(&ns, hay), &matches); + assert_eq!(&aut_findf(&ns, hay), &matches); + assert_eq!(&aut_findfs(&ns, hay), &matches); + } + + #[test] + fn many_longer_pattern_one_match() { + let ns = vec!["abc", "xyz"]; + let hay = "zazxyzz"; + let matches = vec![ Match { pati: 1, start: 3, end: 6 } ]; + assert_eq!(&aut_find(&ns, hay), &matches); + assert_eq!(&aut_finds(&ns, hay), &matches); + assert_eq!(&aut_findf(&ns, hay), &matches); + assert_eq!(&aut_findfs(&ns, hay), &matches); + } + + #[test] + fn many_longer_pattern_many_match() { + let ns = vec!["abc", "xyz"]; + let hay = "zazxyzzzzzazzzabcxyz"; + let matches = vec![ + Match { pati: 1, start: 3, end: 6 }, + Match { pati: 0, start: 14, end: 17 }, + Match { pati: 1, start: 17, end: 20 }, + ]; + assert_eq!(&aut_find(&ns, hay), &matches); + assert_eq!(&aut_finds(&ns, hay), &matches); + assert_eq!(&aut_findf(&ns, hay), &matches); + assert_eq!(&aut_findfs(&ns, hay), &matches); + } + + #[test] + fn many_longer_pattern_overlap_one_match() { + let ns = vec!["abc", "bc"]; + let hay = "zazabcz"; + let matches = vec![ + Match { pati: 0, start: 3, end: 6 }, + Match { pati: 1, start: 4, end: 6 }, + ]; + assert_eq!(&aut_findo(&ns, hay), &matches); + assert_eq!(&aut_findos(&ns, hay), &matches); + assert_eq!(&aut_findfo(&ns, hay), &matches); + assert_eq!(&aut_findfos(&ns, hay), &matches); + } + + #[test] + fn many_longer_pattern_overlap_one_match_reverse() { + let ns = vec!["abc", "bc"]; + let hay = "xbc"; + let matches = vec![ Match { pati: 1, start: 1, end: 3 } ]; + assert_eq!(&aut_findo(&ns, hay), &matches); + assert_eq!(&aut_findos(&ns, hay), &matches); + assert_eq!(&aut_findfo(&ns, hay), &matches); + assert_eq!(&aut_findfos(&ns, hay), &matches); + } + + #[test] + fn many_longer_pattern_overlap_many_match() { + let ns = vec!["abc", "bc", "c"]; + let hay = "zzzabczzzbczzzc"; + let matches = vec![ + Match { pati: 0, start: 3, end: 6 }, + Match { pati: 1, start: 4, end: 6 }, + Match { pati: 2, start: 5, end: 6 }, + Match { pati: 1, start: 9, end: 11 }, + Match { pati: 2, start: 10, end: 11 }, + Match { pati: 2, start: 14, end: 15 }, + ]; + assert_eq!(&aut_findo(&ns, hay), &matches); + assert_eq!(&aut_findos(&ns, hay), &matches); + assert_eq!(&aut_findfo(&ns, hay), &matches); + assert_eq!(&aut_findfos(&ns, hay), &matches); + } + + #[test] + fn many_longer_pattern_overlap_many_match_reverse() { + let ns = vec!["abc", "bc", "c"]; + let hay = "zzzczzzbczzzabc"; + let matches = vec![ + Match { pati: 2, start: 3, end: 4 }, + Match { pati: 1, start: 7, end: 9 }, + Match { pati: 2, start: 8, end: 9 }, + Match { pati: 0, start: 12, end: 15 }, + Match { pati: 1, start: 13, end: 15 }, + Match { pati: 2, start: 14, end: 15 }, + ]; + assert_eq!(&aut_findo(&ns, hay), &matches); + assert_eq!(&aut_findos(&ns, hay), &matches); + assert_eq!(&aut_findfo(&ns, hay), &matches); + assert_eq!(&aut_findfos(&ns, hay), &matches); + } + + #[test] + fn pattern_returns_original_type() { + let aut = AcAutomaton::new(vec!["apple", "maple"]); + + // Explicitly given this type to assert that the thing returned + // from the function is our original type. + let pat: &str = aut.pattern(0); + assert_eq!(pat, "apple"); + + // Also check the return type of the `patterns` function. + let pats: &[&str] = aut.patterns(); + assert_eq!(pats, &["apple", "maple"]); + } + + // Quickcheck time. + + // This generates very small ascii strings, which makes them more likely + // to interact in interesting ways with larger haystack strings. + #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] + pub struct SmallAscii(String); + + impl Arbitrary for SmallAscii { + fn arbitrary(g: &mut G) -> SmallAscii { + use std::char::from_u32; + SmallAscii((0..2) + .map(|_| from_u32(g.gen_range(97, 123)).unwrap()) + .collect()) + } + + fn shrink(&self) -> Box> { + Box::new(self.0.shrink().map(SmallAscii)) + } + } + + impl From for String { + fn from(s: SmallAscii) -> String { s.0 } + } + + impl AsRef<[u8]> for SmallAscii { + fn as_ref(&self) -> &[u8] { self.0.as_ref() } + } + + // This is the same arbitrary impl as `String`, except it has a bias toward + // ASCII characters. + #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] + pub struct BiasAscii(String); + + impl Arbitrary for BiasAscii { + fn arbitrary(g: &mut G) -> BiasAscii { + use std::char::from_u32; + let size = { let s = g.size(); g.gen_range(0, s) }; + let mut s = String::with_capacity(size); + for _ in 0..size { + if g.gen_weighted_bool(3) { + s.push(char::arbitrary(g)); + } else { + for _ in 0..5 { + s.push(from_u32(g.gen_range(97, 123)).unwrap()); + } + } + } + BiasAscii(s) + } + + fn shrink(&self) -> Box> { + Box::new(self.0.shrink().map(BiasAscii)) + } + } + + fn naive_find(xs: &[S], haystack: &str) -> Vec + where S: Clone + Into { + let needles: Vec = + xs.to_vec().into_iter().map(Into::into).collect(); + let mut matches = vec![]; + for hi in 0..haystack.len() { + for (pati, needle) in needles.iter().enumerate() { + let needle = needle.as_bytes(); + if needle.len() == 0 || needle.len() > haystack.len() - hi { + continue; + } + if needle == &haystack.as_bytes()[hi..hi+needle.len()] { + matches.push(Match { + pati: pati, + start: hi, + end: hi + needle.len(), + }); + } + } + } + matches + } + + #[test] + fn qc_ac_equals_naive() { + fn prop(needles: Vec, haystack: BiasAscii) -> bool { + let aut_matches = aut_findo(&needles, &haystack.0); + let naive_matches = naive_find(&needles, &haystack.0); + // Ordering isn't always the same. I don't think we care, so do + // an unordered comparison. + let aset: HashSet = aut_matches.iter().cloned().collect(); + let nset: HashSet = naive_matches.iter().cloned().collect(); + aset == nset + } + quickcheck(prop as fn(Vec, BiasAscii) -> bool); + } +} diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/src/main.rs cargo-0.19.0/vendor/aho-corasick-0.6.3/src/main.rs --- cargo-0.17.0/vendor/aho-corasick-0.6.3/src/main.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/src/main.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,13 @@ +extern crate memchr; + +use std::env; + +use lib::AcAutomaton; + +#[allow(dead_code)] +mod lib; + +fn main() { + let aut = AcAutomaton::new(env::args().skip(1)); + println!("{}", aut.dot().trim()); +} diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/.travis.yml cargo-0.19.0/vendor/aho-corasick-0.6.3/.travis.yml --- cargo-0.17.0/vendor/aho-corasick-0.6.3/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/.travis.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,13 @@ +language: rust +rust: + - 1.12.0 + - stable + - beta + - nightly +script: + - cargo build --verbose + - cargo test --verbose + - cargo doc + - if [ "$TRAVIS_RUST_VERSION" = "nightly" ]; then + cargo bench --verbose; + fi diff -Nru cargo-0.17.0/vendor/aho-corasick-0.6.3/UNLICENSE cargo-0.19.0/vendor/aho-corasick-0.6.3/UNLICENSE --- cargo-0.17.0/vendor/aho-corasick-0.6.3/UNLICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/aho-corasick-0.6.3/UNLICENSE 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/AUTHORS.txt cargo-0.19.0/vendor/chrono-0.2.25/AUTHORS.txt --- cargo-0.17.0/vendor/chrono-0.2.25/AUTHORS.txt 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/AUTHORS.txt 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,26 @@ +Chrono is mainly written by Kang Seonghoon , +and also the following people (in ascending order): + +Alexander Bulaev +Ben Eills +Colin Ray +Corey Farwell +Dan +Danilo Bargen +David Hewson +David Ross +David Willie +Eunchong Yu +Huon Wilson +Jisoo Park +Joe Wilm +John Heitmann +John Nagle +Ken Tossell +Martin Risell Lilja +Ryan Lewis +Sergey V. Galtsev +Steve Klabnik +Tom Gallacher +klutzy +kud1ing diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/.cargo-checksum.json cargo-0.19.0/vendor/chrono-0.2.25/.cargo-checksum.json --- cargo-0.17.0/vendor/chrono-0.2.25/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"e4721fe8d991d6b9f58da6fba573906a759b58b49ba58cb15b0ca279b3bc53cb","AUTHORS.txt":"4ba13d189cb419382ef9837e74de67c2dba98ff1b378816de2a3f59922da598a","CHANGELOG.md":"5dcce1ee25d1e37a0fa3ce162568061787a13870297d5f95bffa44156efe8701","Cargo.toml":"d7703847fb88c9edcd69a7ce84a4098d21a6dfa2159454067cf9bd56577073ac","LICENSE.txt":"37064a3e3254d5a0c7963d476125e25762e86adadc71959549767486e37c8038","Makefile":"6901ba54f43a90422482457a9237edbee41490b394c09fe5a7d40129db7be5b0","README.md":"d674954a5135c85e2af5e6efa3480b64f16f79dcfface35b01dd837c3b942af6","src/date.rs":"54ccfd7964c1d0ef601c8818bd59c88bf2fb61b51ea78336f190f5e793a47b8d","src/datetime.rs":"400cf1666cfc7224b2e38fbab31236a07f9d75418c9c7b3962d9871e4bda51af","src/div.rs":"bf03964177e2960b0c4726f4856f12b597a59b051241e2a0121501b78825dce8","src/format/mod.rs":"ff50334f39ce537e4745424c8e1a7632a8ec5179fd9047fa0d6cf622d6ce182a","src/format/parse.rs":"0b3ac142ac27b7a32618684b18453e2fd43c7d7d7ddc9b3adbf395c5934e0f1c","src/format/parsed.rs":"6ce9196fa34f29e64a1bc14e76a35b76f4ad5bf72711df8eba2b196aad5ec811","src/format/scan.rs":"ea5ebe5ab966c70f18605edce9a55098ee5f661da1a02b0710559d76067bab79","src/format/strftime.rs":"35ee925171f8c02e876a9b4f515d6ba7eadf3fc8cc914759bee382d5821270e7","src/lib.rs":"1e88f2bdc97130bd6ec3f87bfec4d671167cd66e9daa953b7ce11ceb5ea62928","src/naive/date.rs":"ad4e6b0a4ad939afd79981015d4b2004a33f66abd3c0a3d18a0765d6b87900a1","src/naive/datetime.rs":"317ab30a8648aef7440da5a813a55a7346c24ff13953436bcae7f6888ed0a0c6","src/naive/time.rs":"dab2c7a6cbd8943a1a775c6c8a9a042fed0dacca623c741871d3969a592d733f","src/offset/fixed.rs":"9f103b5abb3927a19bfeb533db5a695451a5e474ed645c7cf1ac52649bc5fe8a","src/offset/local.rs":"c29fdd66a0dd39f32ded7834479dd8755022a791fb13be1ae5027999a86e4a9e","src/offset/mod.rs":"3e732d056a29585d3eecd74ccdbb38c050d08def9d10f0000a7328761e6c77e6","src/offset/utc.rs":"072b460f6b726877344207e68edc00507e08d8a9168f9f571b6631a0c73ea7be"},"package":"9213f7cd7c27e95c2b57c49f0e69b1ea65b27138da84a170133fd21b07659c00"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/Cargo.toml cargo-0.19.0/vendor/chrono-0.2.25/Cargo.toml --- cargo-0.17.0/vendor/chrono-0.2.25/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,24 @@ +[package] +name = "chrono" +version = "0.2.25" +authors = ["Kang Seonghoon "] + +description = "Date and time library for Rust" +homepage = "https://github.com/lifthrasiir/rust-chrono" +documentation = "https://lifthrasiir.github.io/rust-chrono/" +repository = "https://github.com/lifthrasiir/rust-chrono" +keywords = ["date", "time", "calendar"] +readme = "README.md" +license = "MIT/Apache-2.0" + +[lib] +name = "chrono" + +[dependencies] +time = "0.1" +num = { version = "0.1", default-features = false } +rustc-serialize = { version = "0.3", optional = true } +serde = { version = "<0.9", optional = true } + +[dev-dependencies] +serde_json = { version = ">=0.7.0" } diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/CHANGELOG.md cargo-0.19.0/vendor/chrono-0.2.25/CHANGELOG.md --- cargo-0.17.0/vendor/chrono-0.2.25/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/CHANGELOG.md 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,302 @@ +ChangeLog for Chrono +==================== + +This documents all notable changes to [Chrono](https://github.com/lifthrasiir/rust-chrono). + +Chrono obeys the principle of [Semantic Versioning](http://semver.org/). + +There were/are numerous minor versions before 1.0 due to the language changes. +Versions with only mechnical changes will be omitted from the following list. + +## 0.2.25 (2016-08-04) + +(0.2.24 was accidentally uploaded without a proper check for warnings in the default state, +and replaced by 0.2.25 very shortly. Duh.) + +### Added + +- Serde 0.8 is now supported. 0.7 also remains supported. (#86) + +### Fixed + +- The deserialization implementation for rustc-serialize now properly verifies the input. + All serialization codes are also now thoroughly tested. (#42) + +## 0.2.23 (2016-08-03) + +### Added + +- The documentation was greatly improved for several types, + and tons of cross-references have been added. (#77, #78, #80, #82) + +- `DateTime::timestamp_subsec_{millis,micros,nanos}` methods have been added. (#81) + +### Fixed + +- When the system time records a leap second, + the nanosecond component was mistakenly reset to zero. (#84) + +- `Local` offset misbehaves in Windows for August and later, + due to the long-standing libtime bug (dates back to mid-2015). + Workaround has been implemented. (#85) + +## 0.2.22 (2016-04-22) + +### Fixed + +- `%.6f` and `%.9f` used to print only three digits when the nanosecond part is zero. (#71) +- The documentation for `%+` has been updated to reflect the current status. (#71) + +## 0.2.21 (2016-03-29) + +### Fixed + +- `Fixed::LongWeekdayName` was unable to recognize `"sunday"` (whoops). (#66) + +## 0.2.20 (2016-03-06) + +### Changed + +- `serde` dependency has been updated to 0.7. (#63, #64) + +## 0.2.19 (2016-02-05) + +### Added + +- The documentation for `Date` is made clear about its ambiguity and guarantees. + +### Fixed + +- `DateTime::date` had been wrong when the local date and the UTC date is in disagreement. (#61) + +## 0.2.18 (2016-01-23) + +### Fixed + +- Chrono no longer pulls a superfluous `rand` dependency. (#57) + +## 0.2.17 (2015-11-22) + +### Added + +- Naive date and time types and `DateTime` now have a `serde` support. + They serialize as an ISO 8601 / RFC 3339 string just like `Debug`. (#51) + +## 0.2.16 (2015-09-06) + +### Added + +- Added `%.3f`, `%.6f` and `%.9f` specifier for formatting fractional seconds + up to 3, 6 or 9 decimal digits. This is a natural extension to the existing `%f`. + Note that this is (not yet) generic, no other value of precision is supported. (#45) + +### Changed + +- Forbade unsized types from implementing `Datelike` and `Timelike`. + This does not make a big harm as any type implementing them should be already sized + to be practical, but this change still can break highly generic codes. (#46) + +### Fixed + +- Fixed a broken link in the `README.md`. (#41) + +## 0.2.15 (2015-07-05) + +### Added + +- Padding modifiers `%_?`, `%-?` and `%0?` are implemented. + They are glibc extensions which seem to be reasonably widespread (e.g. Ruby). + +- Added `%:z` specifier and corresponding formatting items + which is essentially same to `%z` but with a colon. + +- Added a new specifier `%.f` which precision adapts from the input. + This was added as a response to the UX problems in the original nanosecond specifier `%f`. + +### Fixed + +- `Numeric::Timestamp` specifier (`%s`) was ignoring the time zone offset when provided. + +- Improved the documentation and associated tests for `strftime`. + +## 0.2.14 (2015-05-15) + +### Fixed + +- `NaiveDateTime +/- Duration` or `NaiveTime +/- Duration` could have gone wrong + when the `Duration` to be added is negative and has a fractional second part. + This was caused by an underflow in the conversion from `Duration` to the parts; + the lack of tests for this case allowed a bug. (#37) + +## 0.2.13 (2015-04-29) + +### Added + +- The optional dependency on `rustc_serialize` and + relevant `Rustc{En,De}codable` implementations for supported types has been added. + This is enabled by the `rustc-serialize` Cargo feature. (#34) + +### Changed + +- `chrono::Duration` reexport is changed to that of crates.io `time` crate. + This enables Rust 1.0 beta compatibility. + +## 0.2.4 (2015-03-03) + +### Fixed + +- Clarified the meaning of `Date` and fixed unwanted conversion problem + that only occurs with positive UTC offsets. (#27) + +## 0.2.3 (2015-02-27) + +### Added + +- `DateTime` and `Date` is now `Copy`/`Send` when `Tz::Offset` is `Copy`/`Send`. + The implementations for them were mistakenly omitted. (#25) + +### Fixed + +- `Local::from_utc_datetime` didn't set a correct offset. (#26) + +## 0.2.1 (2015-02-21) + +### Changed + +- `DelayedFormat` no longer conveys a redundant lifetime. + +## 0.2.0 (2015-02-19) + +### Added + +- `Offset` is splitted into `TimeZone` (constructor) and `Offset` (storage) types. + You would normally see only the former, as the latter is mostly an implementation detail. + Most importantly, `Local` now can be used to directly construct timezone-aware values. + + Some types (currently, `UTC` and `FixedOffset`) are both `TimeZone` and `Offset`, + but others aren't (e.g. `Local` is not what is being stored to each `DateTime` values). + +- `LocalResult::map` convenience method has been added. + +- `TimeZone` now allows a construction of `DateTime` values from UNIX timestamp, + via `timestamp` and `timestamp_opt` methods. + +- `TimeZone` now also has a method for parsing `DateTime`, namely `datetime_from_str`. + +- The following methods have been added to all date and time types: + + - `checked_add` + - `checked_sub` + - `format_with_items` + +- The following methods have been added to all timezone-aware types: + + - `timezone` + - `with_timezone` + - `naive_utc` + - `naive_local` + +- `parse_from_str` method has been added to all naive types and `DateTime`. + +- All naive types and instances of `DateTime` with time zones `UTC`, `Local` and `FixedOffset` + implement the `FromStr` trait. They parse what `std::fmt::Debug` would print. + +- `chrono::format` has been greatly rewritten. + + - The formatting syntax parser is modular now, available at `chrono::format::strftime`. + + - The parser and resolution algorithm is also modular, the former is available at + `chrono::format::parse` while the latter is available at `chrono::format::parsed`. + + - Explicit support for RFC 2822 and 3339 syntaxes is landed. + + - There is a minor formatting difference with atypical values, + e.g. for years not between 1 BCE and 9999 CE. + +### Changed + +- Most uses of `Offset` are converted to `TimeZone`. + In fact, *all* user-facing code is expected to be `Offset`-free. + +- `[Naive]DateTime::*num_seconds_from_unix_epoch*` methods have been renamed to + simply `timestamp` or `from_timestamp*`. The original names have been deprecated. + +### Removed + +- `Time` has been removed. This also prompts a related set of methods in `TimeZone`. + + This is in principle possible, but in practice has seen a little use + because it can only be meaningfully constructed via an existing `DateTime` value. + This made many operations to `Time` unintuitive or ambiguous, + so we simply let it go. + + In the case that `Time` is really required, one can use a simpler `NaiveTime`. + `NaiveTime` and `NaiveDate` can be freely combined and splitted, + and `TimeZone::from_{local,utc}_datetime` can be used to convert from/to the local time. + +- `with_offset` method has been removed. Use `with_timezone` method instead. + (This is not deprecated since it is an integral part of offset reform.) + +## 0.1.14 (2015-01-10) + +### Added + +- Added a missing `std::fmt::String` impl for `Local`. + +## 0.1.13 (2015-01-10) + +### Changed + +- Most types now implement both `std::fmt::Show` and `std::fmt::String`, + with the former used for the stricter output and the latter used for more casual output. + +### Removed + +- `Offset::name` has been replaced by a `std::fmt::String` implementation to `Offset`. + +## 0.1.12 (2015-01-08) + +### Removed + +- `Duration + T` no longer works due to the updated impl reachability rules. + Use `T + Duration` as a workaround. + +## 0.1.4 (2014-12-13) + +### Fixed + +- Fixed a bug that `Date::and_*` methods with an offset that can change the date are + off by one day. + +## 0.1.3 (2014-11-28) + +### Added + +- `{Date,Time,DateTime}::with_offset` methods have been added. + +- `LocalResult` now implements a common set of traits. + +- `LocalResult::and_*` methods have been added. + They are useful for safely chaining `LocalResult>` methods + to make `LocalResult>`. + +### Changed + +- `Offset::name` now returns `SendStr`. + +- `{Date,Time} - Duration` overloadings are now allowed. + +## 0.1.2 (2014-11-24) + +### Added + +- `Duration + Date` overloading is now allowed. + +### Changed + +- Chrono no longer needs `num` dependency. + +## 0.1.0 (2014-11-20) + +The initial version that was available to `crates.io`. + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/.gitignore cargo-0.19.0/vendor/chrono-0.2.25/.gitignore --- cargo-0.17.0/vendor/chrono-0.2.25/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/.gitignore 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,2 @@ +target +Cargo.lock diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/LICENSE.txt cargo-0.19.0/vendor/chrono-0.2.25/LICENSE.txt --- cargo-0.17.0/vendor/chrono-0.2.25/LICENSE.txt 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/LICENSE.txt 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,239 @@ +Rust-chrono is dual-licensed under The MIT License [1] and +Apache 2.0 License [2]. Copyright (c) 2014, Kang Seonghoon. + +Nota Bene: This is same as the Rust Project's own license. + + +[1]: , which is reproduced below: + +~~~~ +The MIT License (MIT) + +Copyright (c) 2014, Kang Seonghoon. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +~~~~ + + +[2]: , which is reproduced below: + +~~~~ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +~~~~ + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/Makefile cargo-0.19.0/vendor/chrono-0.2.25/Makefile --- cargo-0.17.0/vendor/chrono-0.2.25/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/Makefile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,60 @@ +# this Makefile is mostly for the packaging convenience. +# casual users should use `cargo` to retrieve the appropriate version of Chrono. + +.PHONY: all +all: + @echo 'Try `cargo build` instead.' + +.PHONY: authors +authors: + echo 'Chrono is mainly written by Kang Seonghoon ,' > AUTHORS.txt + echo 'and also the following people (in ascending order):' >> AUTHORS.txt + echo >> AUTHORS.txt + git log --format='%aN <%aE>' | grep -v 'Kang Seonghoon' | sort -u >> AUTHORS.txt + +.PHONY: readme +readme: README.md + +README.md: src/lib.rs + # really, really sorry for this mess. + awk '/^\/\/! # Chrono /{print "[Chrono][doc]",$$4}' $< > $@ + awk '/^\/\/! # Chrono /{print "[Chrono][doc]",$$4}' $< | sed 's/./=/g' >> $@ + echo >> $@ + echo '[![Chrono on Travis CI][travis-image]][travis]' >> $@ + echo >> $@ + echo '[travis-image]: https://travis-ci.org/lifthrasiir/rust-chrono.png' >> $@ + echo '[travis]: https://travis-ci.org/lifthrasiir/rust-chrono' >> $@ + awk '/^\/\/! # Chrono /,/^\/\/! ## /' $< | cut -b 5- | grep -v '^#' | \ + sed 's/](\.\//](https:\/\/lifthrasiir.github.io\/rust-chrono\/chrono\//g' >> $@ + echo '[Complete Documentation][doc]' >> $@ + echo >> $@ + echo '[doc]: https://lifthrasiir.github.io/rust-chrono/' >> $@ + echo >> $@ + awk '/^\/\/! ## /,!/^\/\/!/' $< | cut -b 5- | grep -v '^# ' | \ + sed 's/](\.\//](https:\/\/lifthrasiir.github.io\/rust-chrono\/chrono\//g' >> $@ + +.PHONY: test +test: + cargo test --features 'serde rustc-serialize' + +.PHONY: doc +doc: authors readme + cargo doc --features 'serde rustc-serialize' + +.PHONY: doc-publish +doc-publish: doc + ( \ + PKGID="$$(cargo pkgid)"; \ + PKGNAMEVER="$${PKGID#*#}"; \ + PKGNAME="$${PKGNAMEVER%:*}"; \ + REMOTE="$$(git config --get remote.origin.url)"; \ + cd target/doc && \ + rm -rf .git && \ + git init && \ + git checkout --orphan gh-pages && \ + echo '' > index.html && \ + git add . && \ + git commit -m 'updated docs.' && \ + git push "$$REMOTE" gh-pages -f; \ + ) + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/README.md cargo-0.19.0/vendor/chrono-0.2.25/README.md --- cargo-0.17.0/vendor/chrono-0.2.25/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/README.md 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,322 @@ +[Chrono][doc] 0.2.25 +==================== + +[![Chrono on Travis CI][travis-image]][travis] + +[travis-image]: https://travis-ci.org/lifthrasiir/rust-chrono.png +[travis]: https://travis-ci.org/lifthrasiir/rust-chrono + +Date and time handling for Rust. (also known as `rust-chrono`) +It aims to be a feature-complete superset of +the [time](https://github.com/rust-lang-deprecated/time) library. +In particular, + +* Chrono strictly adheres to ISO 8601. +* Chrono is timezone-aware by default, with separate timezone-naive types. +* Chrono is space-optimal and (while not being the primary goal) reasonably efficient. + +There were several previous attempts to bring a good date and time library to Rust, +which Chrono builts upon and should acknowledge: + +* [Initial research on + the wiki](https://github.com/rust-lang/rust-wiki-backup/blob/master/Lib-datetime.md) +* Dietrich Epp's [datetime-rs](https://github.com/depp/datetime-rs) +* Luis de Bethencourt's [rust-datetime](https://github.com/luisbg/rust-datetime) + +[Complete Documentation][doc] + +[doc]: https://lifthrasiir.github.io/rust-chrono/ + +## Usage + +Put this in your `Cargo.toml`: + +```toml +[dependencies] +chrono = "0.2" +``` + +Or, if you want [Serde](https://github.com/serde-rs/serde) or +[rustc-serialize](https://github.com/rust-lang-nursery/rustc-serialize) support, +include the features like this: + +```toml +[dependencies] +chrono = { version = "0.2", features = ["serde", "rustc-serialize"] } +``` + +Then put this in your crate root: + +```rust +extern crate chrono; +``` + +## Overview + +### Duration + +[**`Duration`**](https://lifthrasiir.github.io/rust-chrono/chrono/struct.Duration.html) +represents the magnitude of a time span. `Duration` used to be provided by Chrono. +It has been moved to the `time` crate as the +[`time::Duration`](https://doc.rust-lang.org/time/time/struct.Duration.html) type, but is +still re-exported from Chrono. + +### Date and Time + +Chrono provides a +[**`DateTime`**](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html) +type to represent a date and a time in a timezone. + +For more abstract moment-in-time tracking such as internal timekeeping +that is unconcerned with timezones, consider +[`time::SystemTime`](https://doc.rust-lang.org/std/time/struct.SystemTime.html), +which tracks your system clock, or +[`time::Instant`](https://doc.rust-lang.org/std/time/struct.Instant.html), which +is an opaque but monotonically-increasing representation of a moment in time. + +`DateTime` is timezone-aware and must be constructed from +the [**`TimeZone`**](https://lifthrasiir.github.io/rust-chrono/chrono/offset/trait.TimeZone.html) object, +which defines how the local date is converted to and back from the UTC date. +There are three well-known `TimeZone` implementations: + +* [**`UTC`**](https://lifthrasiir.github.io/rust-chrono/chrono/offset/utc/struct.UTC.html) specifies the UTC time zone. It is most efficient. + +* [**`Local`**](https://lifthrasiir.github.io/rust-chrono/chrono/offset/local/struct.Local.html) specifies the system local time zone. + +* [**`FixedOffset`**](https://lifthrasiir.github.io/rust-chrono/chrono/offset/fixed/struct.FixedOffset.html) specifies + an arbitrary, fixed time zone such as UTC+09:00 or UTC-10:30. + This often results from the parsed textual date and time. + Since it stores the most information and does not depend on the system environment, + you would want to normalize other `TimeZone`s into this type. + +`DateTime`s with different `TimeZone` types are distinct and do not mix, +but can be converted to each other using +the [`DateTime::with_timezone`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.with_timezone) method. + +You can get the current date and time in the UTC time zone +([`UTC::now()`](https://lifthrasiir.github.io/rust-chrono/chrono/offset/utc/struct.UTC.html#method.now)) +or in the local time zone +([`Local::now()`](https://lifthrasiir.github.io/rust-chrono/chrono/offset/local/struct.Local.html#method.now)). + +~~~~ {.rust} +use chrono::*; + +let utc: DateTime = UTC::now(); // e.g. `2014-11-28T12:45:59.324310806Z` +let local: DateTime = Local::now(); // e.g. `2014-11-28T21:45:59.324310806+09:00` +~~~~ + +Alternatively, you can create your own date and time. +This is a bit verbose due to Rust's lack of function and method overloading, +but in turn we get a rich combination of initialization methods. + +~~~~ {.rust} +use chrono::*; + +let dt = UTC.ymd(2014, 7, 8).and_hms(9, 10, 11); // `2014-07-08T09:10:11Z` +// July 8 is 188th day of the year 2014 (`o` for "ordinal") +assert_eq!(dt, UTC.yo(2014, 189).and_hms(9, 10, 11)); +// July 8 is Tuesday in ISO week 28 of the year 2014. +assert_eq!(dt, UTC.isoywd(2014, 28, Weekday::Tue).and_hms(9, 10, 11)); + +let dt = UTC.ymd(2014, 7, 8).and_hms_milli(9, 10, 11, 12); // `2014-07-08T09:10:11.012Z` +assert_eq!(dt, UTC.ymd(2014, 7, 8).and_hms_micro(9, 10, 11, 12_000)); +assert_eq!(dt, UTC.ymd(2014, 7, 8).and_hms_nano(9, 10, 11, 12_000_000)); + +// dynamic verification +assert_eq!(UTC.ymd_opt(2014, 7, 8).and_hms_opt(21, 15, 33), + LocalResult::Single(UTC.ymd(2014, 7, 8).and_hms(21, 15, 33))); +assert_eq!(UTC.ymd_opt(2014, 7, 8).and_hms_opt(80, 15, 33), LocalResult::None); +assert_eq!(UTC.ymd_opt(2014, 7, 38).and_hms_opt(21, 15, 33), LocalResult::None); + +// other time zone objects can be used to construct a local datetime. +// obviously, `local_dt` is normally different from `dt`, but `fixed_dt` should be identical. +let local_dt = Local.ymd(2014, 7, 8).and_hms_milli(9, 10, 11, 12); +let fixed_dt = FixedOffset::east(9 * 3600).ymd(2014, 7, 8).and_hms_milli(18, 10, 11, 12); +assert_eq!(dt, fixed_dt); +~~~~ + +Various properties are available to the date and time, and can be altered individually. +Most of them are defined in the traits [`Datelike`](https://lifthrasiir.github.io/rust-chrono/chrono/trait.Datelike.html) and +[`Timelike`](https://lifthrasiir.github.io/rust-chrono/chrono/trait.Timelike.html) which you should `use` before. +Addition and subtraction is also supported. +The following illustrates most supported operations to the date and time: + +~~~~ {.rust} +use chrono::*; + +// assume this returned `2014-11-28T21:45:59.324310806+09:00`: +let dt = Local::now(); + +// property accessors +assert_eq!((dt.year(), dt.month(), dt.day()), (2014, 11, 28)); +assert_eq!((dt.month0(), dt.day0()), (10, 27)); // for unfortunate souls +assert_eq!((dt.hour(), dt.minute(), dt.second()), (21, 45, 59)); +assert_eq!(dt.weekday(), Weekday::Fri); +assert_eq!(dt.weekday().number_from_monday(), 5); // Mon=1, ..., Sat=7 +assert_eq!(dt.ordinal(), 332); // the day of year +assert_eq!(dt.num_days_from_ce(), 735565); // the number of days from and including Jan 1, 1 + +// time zone accessor and manipulation +assert_eq!(dt.offset().local_minus_utc(), Duration::hours(9)); +assert_eq!(dt.timezone(), FixedOffset::east(9 * 3600)); +assert_eq!(dt.with_timezone(&UTC), UTC.ymd(2014, 11, 28).and_hms_nano(12, 45, 59, 324310806)); + +// a sample of property manipulations (validates dynamically) +assert_eq!(dt.with_day(29).unwrap().weekday(), Weekday::Sat); // 2014-11-29 is Saturday +assert_eq!(dt.with_day(32), None); +assert_eq!(dt.with_year(-300).unwrap().num_days_from_ce(), -109606); // November 29, 301 BCE + +// arithmetic operations +assert_eq!(UTC.ymd(2014, 11, 14).and_hms(8, 9, 10) - UTC.ymd(2014, 11, 14).and_hms(10, 9, 8), + Duration::seconds(-2 * 3600 + 2)); +assert_eq!(UTC.ymd(1970, 1, 1).and_hms(0, 0, 0) + Duration::seconds(1_000_000_000), + UTC.ymd(2001, 9, 9).and_hms(1, 46, 40)); +assert_eq!(UTC.ymd(1970, 1, 1).and_hms(0, 0, 0) - Duration::seconds(1_000_000_000), + UTC.ymd(1938, 4, 24).and_hms(22, 13, 20)); +~~~~ + +Formatting is done via the [`format`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.format) method, +which format is equivalent to the familiar `strftime` format. +(See the [`format::strftime` module documentation](https://lifthrasiir.github.io/rust-chrono/chrono/format/strftime/index.html#specifiers) +for full syntax.) + +The default `to_string` method and `{:?}` specifier also give a reasonable representation. +Chrono also provides [`to_rfc2822`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.to_rfc2822) and +[`to_rfc3339`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.to_rfc3339) methods +for well-known formats. + +~~~~ {.rust} +use chrono::*; + +let dt = UTC.ymd(2014, 11, 28).and_hms(12, 0, 9); +assert_eq!(dt.format("%Y-%m-%d %H:%M:%S").to_string(), "2014-11-28 12:00:09"); +assert_eq!(dt.format("%a %b %e %T %Y").to_string(), "Fri Nov 28 12:00:09 2014"); +assert_eq!(dt.format("%a %b %e %T %Y").to_string(), dt.format("%c").to_string()); + +assert_eq!(dt.to_string(), "2014-11-28 12:00:09 UTC"); +assert_eq!(dt.to_rfc2822(), "Fri, 28 Nov 2014 12:00:09 +0000"); +assert_eq!(dt.to_rfc3339(), "2014-11-28T12:00:09+00:00"); +assert_eq!(format!("{:?}", dt), "2014-11-28T12:00:09Z"); +~~~~ + +Parsing can be done with three methods: + +1. The standard [`FromStr`](https://doc.rust-lang.org/std/str/trait.FromStr.html) trait + (and [`parse`](https://doc.rust-lang.org/std/primitive.str.html#method.parse) method + on a string) can be used for parsing `DateTime`, `DateTime` and + `DateTime` values. This parses what the `{:?}` + ([`std::fmt::Debug`](https://doc.rust-lang.org/std/fmt/trait.Debug.html)) + format specifier prints, and requires the offset to be present. + +2. [`DateTime::parse_from_str`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.parse_from_str) parses + a date and time with offsets and returns `DateTime`. + This should be used when the offset is a part of input and the caller cannot guess that. + It *cannot* be used when the offset can be missing. + [`DateTime::parse_from_rfc2822`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.parse_from_rfc2822) + and + [`DateTime::parse_from_rfc3339`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.parse_from_rfc3339) + are similar but for well-known formats. + +3. [`Offset::datetime_from_str`](https://lifthrasiir.github.io/rust-chrono/chrono/offset/trait.TimeZone.html#method.datetime_from_str) is + similar but returns `DateTime` of given offset. + When the explicit offset is missing from the input, it simply uses given offset. + It issues an error when the input contains an explicit offset different + from the current offset. + +More detailed control over the parsing process is available via +[`format`](https://lifthrasiir.github.io/rust-chrono/chrono/format/index.html) module. + +~~~~ {.rust} +use chrono::*; + +let dt = UTC.ymd(2014, 11, 28).and_hms(12, 0, 9); +let fixed_dt = dt.with_timezone(&FixedOffset::east(9*3600)); + +// method 1 +assert_eq!("2014-11-28T12:00:09Z".parse::>(), Ok(dt.clone())); +assert_eq!("2014-11-28T21:00:09+09:00".parse::>(), Ok(dt.clone())); +assert_eq!("2014-11-28T21:00:09+09:00".parse::>(), Ok(fixed_dt.clone())); + +// method 2 +assert_eq!(DateTime::parse_from_str("2014-11-28 21:00:09 +09:00", "%Y-%m-%d %H:%M:%S %z"), + Ok(fixed_dt.clone())); +assert_eq!(DateTime::parse_from_rfc2822("Fri, 28 Nov 2014 21:00:09 +0900"), + Ok(fixed_dt.clone())); +assert_eq!(DateTime::parse_from_rfc3339("2014-11-28T21:00:09+09:00"), Ok(fixed_dt.clone())); + +// method 3 +assert_eq!(UTC.datetime_from_str("2014-11-28 12:00:09", "%Y-%m-%d %H:%M:%S"), Ok(dt.clone())); +assert_eq!(UTC.datetime_from_str("Fri Nov 28 12:00:09 2014", "%a %b %e %T %Y"), Ok(dt.clone())); + +// oops, the year is missing! +assert!(UTC.datetime_from_str("Fri Nov 28 12:00:09", "%a %b %e %T %Y").is_err()); +// oops, the format string does not include the year at all! +assert!(UTC.datetime_from_str("Fri Nov 28 12:00:09", "%a %b %e %T").is_err()); +// oops, the weekday is incorrect! +assert!(UTC.datetime_from_str("Sat Nov 28 12:00:09 2014", "%a %b %e %T %Y").is_err()); +~~~~ + +### Individual date + +Chrono also provides an individual date type ([**`Date`**](https://lifthrasiir.github.io/rust-chrono/chrono/date/struct.Date.html)). +It also has time zones attached, and have to be constructed via time zones. +Most operations available to `DateTime` are also available to `Date` whenever appropriate. + +~~~~ {.rust} +use chrono::*; + +assert_eq!(UTC::today(), UTC::now().date()); +assert_eq!(Local::today(), Local::now().date()); + +assert_eq!(UTC.ymd(2014, 11, 28).weekday(), Weekday::Fri); +assert_eq!(UTC.ymd_opt(2014, 11, 31), LocalResult::None); +assert_eq!(UTC.ymd(2014, 11, 28).and_hms_milli(7, 8, 9, 10).format("%H%M%S").to_string(), + "070809"); +~~~~ + +There is no timezone-aware `Time` due to the lack of usefulness and also the complexity. + +`DateTime` has [`date`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.date) method +which returns a `Date` which represents its date component. +There is also a [`time`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.time) method, +which simply returns a naive local time described below. + +### Naive date and time + +Chrono provides naive counterparts to `Date`, (non-existent) `Time` and `DateTime` +as [**`NaiveDate`**](https://lifthrasiir.github.io/rust-chrono/chrono/naive/date/struct.NaiveDate.html), +[**`NaiveTime`**](https://lifthrasiir.github.io/rust-chrono/chrono/naive/time/struct.NaiveTime.html) and +[**`NaiveDateTime`**](https://lifthrasiir.github.io/rust-chrono/chrono/naive/datetime/struct.NaiveDateTime.html) respectively. + +They have almost equivalent interfaces as their timezone-aware twins, +but are not associated to time zones obviously and can be quite low-level. +They are mostly useful for building blocks for higher-level types. + +Timezone-aware `DateTime` and `Date` types have two methods returning naive versions: +[`naive_local`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.naive_local) returns +a view to the naive local time, +and [`naive_utc`](https://lifthrasiir.github.io/rust-chrono/chrono/datetime/struct.DateTime.html#method.naive_utc) returns +a view to the naive UTC time. + +## Limitations + +Only proleptic Gregorian calendar (i.e. extended to support older dates) is supported. +Be very careful if you really have to deal with pre-20C dates, they can be in Julian or others. + +Date types are limited in about +/- 262,000 years from the common epoch. +Time types are limited in the nanosecond accuracy. + +[Leap seconds are supported in the representation but +Chrono doesn't try to make use of them](https://lifthrasiir.github.io/rust-chrono/chrono/naive/time/index.html#leap-second-handling). +(The main reason is that leap seconds are not really predictable.) +Almost *every* operation over the possible leap seconds will ignore them. +Consider using `NaiveDateTime` with the implicit TAI (International Atomic Time) scale +if you want. + +Chrono inherently does not support an inaccurate or partial date and time representation. +Any operation that can be ambiguous will return `None` in such cases. +For example, "a month later" of 2014-01-30 is not well-defined +and consequently `UTC.ymd(2014, 1, 30).with_month(2)` returns `None`. + +Advanced time zone handling is not yet supported (but is planned in 0.3). + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/date.rs cargo-0.19.0/vendor/chrono-0.2.25/src/date.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/date.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/date.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,482 @@ +// This is a part of rust-chrono. +// Copyright (c) 2014-2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +/*! + * ISO 8601 calendar date with time zone. + */ + +use std::{fmt, hash}; +use std::cmp::Ordering; +use std::ops::{Add, Sub}; + +use {Weekday, Datelike}; +use duration::Duration; +use offset::{TimeZone, Offset}; +use offset::utc::UTC; +use naive; +use naive::date::NaiveDate; +use naive::time::NaiveTime; +use datetime::DateTime; +use format::{Item, DelayedFormat, StrftimeItems}; + +/// ISO 8601 calendar date with time zone. +/// +/// This type should be considered ambiguous at best, +/// due to the inherent lack of precision required for the time zone resolution. +/// There are some guarantees on the usage of `Date`: +/// +/// - If properly constructed via `TimeZone::ymd` and others without an error, +/// the corresponding local date should exist for at least a moment. +/// (It may still have a gap from the offset changes.) +/// +/// - The `TimeZone` is free to assign *any* `Offset` to the local date, +/// as long as that offset did occur in given day. +/// For example, if `2015-03-08T01:59-08:00` is followed by `2015-03-08T03:00-07:00`, +/// it may produce either `2015-03-08-08:00` or `2015-03-08-07:00` +/// but *not* `2015-03-08+00:00` and others. +/// +/// - Once constructed as a full `DateTime`, +/// `DateTime::date` and other associated methods should return those for the original `Date`. +/// For example, if `dt = tz.ymd(y,m,d).hms(h,n,s)` were valid, `dt.date() == tz.ymd(y,m,d)`. +/// +/// - The date is timezone-agnostic up to one day (i.e. practically always), +/// so the local date and UTC date should be equal for most cases +/// even though the raw calculation between `NaiveDate` and `Duration` may not. +#[derive(Clone)] +pub struct Date { + date: NaiveDate, + offset: Tz::Offset, +} + +/// The minimum possible `Date`. +pub const MIN: Date = Date { date: naive::date::MIN, offset: UTC }; +/// The maximum possible `Date`. +pub const MAX: Date = Date { date: naive::date::MAX, offset: UTC }; + +impl Date { + /// Makes a new `Date` with given *UTC* date and offset. + /// The local date should be constructed via the `TimeZone` trait. + // + // note: this constructor is purposedly not named to `new` to discourage the direct usage. + #[inline] + pub fn from_utc(date: NaiveDate, offset: Tz::Offset) -> Date { + Date { date: date, offset: offset } + } + + /// Makes a new `DateTime` from the current date and given `NaiveTime`. + /// The offset in the current date is preserved. + /// + /// Panics on invalid datetime. + #[inline] + pub fn and_time(&self, time: NaiveTime) -> Option> { + let localdt = self.naive_local().and_time(time); + self.timezone().from_local_datetime(&localdt).single() + } + + /// Makes a new `DateTime` from the current date, hour, minute and second. + /// The offset in the current date is preserved. + /// + /// Panics on invalid hour, minute and/or second. + #[inline] + pub fn and_hms(&self, hour: u32, min: u32, sec: u32) -> DateTime { + self.and_hms_opt(hour, min, sec).expect("invalid time") + } + + /// Makes a new `DateTime` from the current date, hour, minute and second. + /// The offset in the current date is preserved. + /// + /// Returns `None` on invalid hour, minute and/or second. + #[inline] + pub fn and_hms_opt(&self, hour: u32, min: u32, sec: u32) -> Option> { + NaiveTime::from_hms_opt(hour, min, sec).and_then(|time| self.and_time(time)) + } + + /// Makes a new `DateTime` from the current date, hour, minute, second and millisecond. + /// The millisecond part can exceed 1,000 in order to represent the leap second. + /// The offset in the current date is preserved. + /// + /// Panics on invalid hour, minute, second and/or millisecond. + #[inline] + pub fn and_hms_milli(&self, hour: u32, min: u32, sec: u32, milli: u32) -> DateTime { + self.and_hms_milli_opt(hour, min, sec, milli).expect("invalid time") + } + + /// Makes a new `DateTime` from the current date, hour, minute, second and millisecond. + /// The millisecond part can exceed 1,000 in order to represent the leap second. + /// The offset in the current date is preserved. + /// + /// Returns `None` on invalid hour, minute, second and/or millisecond. + #[inline] + pub fn and_hms_milli_opt(&self, hour: u32, min: u32, sec: u32, + milli: u32) -> Option> { + NaiveTime::from_hms_milli_opt(hour, min, sec, milli).and_then(|time| self.and_time(time)) + } + + /// Makes a new `DateTime` from the current date, hour, minute, second and microsecond. + /// The microsecond part can exceed 1,000,000 in order to represent the leap second. + /// The offset in the current date is preserved. + /// + /// Panics on invalid hour, minute, second and/or microsecond. + #[inline] + pub fn and_hms_micro(&self, hour: u32, min: u32, sec: u32, micro: u32) -> DateTime { + self.and_hms_micro_opt(hour, min, sec, micro).expect("invalid time") + } + + /// Makes a new `DateTime` from the current date, hour, minute, second and microsecond. + /// The microsecond part can exceed 1,000,000 in order to represent the leap second. + /// The offset in the current date is preserved. + /// + /// Returns `None` on invalid hour, minute, second and/or microsecond. + #[inline] + pub fn and_hms_micro_opt(&self, hour: u32, min: u32, sec: u32, + micro: u32) -> Option> { + NaiveTime::from_hms_micro_opt(hour, min, sec, micro).and_then(|time| self.and_time(time)) + } + + /// Makes a new `DateTime` from the current date, hour, minute, second and nanosecond. + /// The nanosecond part can exceed 1,000,000,000 in order to represent the leap second. + /// The offset in the current date is preserved. + /// + /// Panics on invalid hour, minute, second and/or nanosecond. + #[inline] + pub fn and_hms_nano(&self, hour: u32, min: u32, sec: u32, nano: u32) -> DateTime { + self.and_hms_nano_opt(hour, min, sec, nano).expect("invalid time") + } + + /// Makes a new `DateTime` from the current date, hour, minute, second and nanosecond. + /// The nanosecond part can exceed 1,000,000,000 in order to represent the leap second. + /// The offset in the current date is preserved. + /// + /// Returns `None` on invalid hour, minute, second and/or nanosecond. + #[inline] + pub fn and_hms_nano_opt(&self, hour: u32, min: u32, sec: u32, + nano: u32) -> Option> { + NaiveTime::from_hms_nano_opt(hour, min, sec, nano).and_then(|time| self.and_time(time)) + } + + /// Makes a new `Date` for the next date. + /// + /// Panics when `self` is the last representable date. + #[inline] + pub fn succ(&self) -> Date { + self.succ_opt().expect("out of bound") + } + + /// Makes a new `Date` for the next date. + /// + /// Returns `None` when `self` is the last representable date. + #[inline] + pub fn succ_opt(&self) -> Option> { + self.date.succ_opt().map(|date| Date::from_utc(date, self.offset.clone())) + } + + /// Makes a new `Date` for the prior date. + /// + /// Panics when `self` is the first representable date. + #[inline] + pub fn pred(&self) -> Date { + self.pred_opt().expect("out of bound") + } + + /// Makes a new `Date` for the prior date. + /// + /// Returns `None` when `self` is the first representable date. + #[inline] + pub fn pred_opt(&self) -> Option> { + self.date.pred_opt().map(|date| Date::from_utc(date, self.offset.clone())) + } + + /// Retrieves an associated offset from UTC. + #[inline] + pub fn offset<'a>(&'a self) -> &'a Tz::Offset { + &self.offset + } + + /// Retrieves an associated time zone. + #[inline] + pub fn timezone(&self) -> Tz { + TimeZone::from_offset(&self.offset) + } + + /// Changes the associated time zone. + /// This does not change the actual `Date` (but will change the string representation). + #[inline] + pub fn with_timezone(&self, tz: &Tz2) -> Date { + tz.from_utc_date(&self.date) + } + + /// Adds given `Duration` to the current date. + /// + /// Returns `None` when it will result in overflow. + #[inline] + pub fn checked_add(self, rhs: Duration) -> Option> { + let date = try_opt!(self.date.checked_add(rhs)); + Some(Date { date: date, offset: self.offset }) + } + + /// Subtracts given `Duration` from the current date. + /// + /// Returns `None` when it will result in overflow. + #[inline] + pub fn checked_sub(self, rhs: Duration) -> Option> { + let date = try_opt!(self.date.checked_sub(rhs)); + Some(Date { date: date, offset: self.offset }) + } + + /// Returns a view to the naive UTC date. + #[inline] + pub fn naive_utc(&self) -> NaiveDate { + self.date + } + + /// Returns a view to the naive local date. + #[inline] + pub fn naive_local(&self) -> NaiveDate { + self.date + self.offset.local_minus_utc() + } +} + +/// Maps the local date to other date with given conversion function. +fn map_local(d: &Date, mut f: F) -> Option> + where F: FnMut(NaiveDate) -> Option { + f(d.naive_local()).and_then(|date| d.timezone().from_local_date(&date).single()) +} + +impl Date where Tz::Offset: fmt::Display { + /// Formats the date with the specified formatting items. + #[inline] + pub fn format_with_items<'a, I>(&self, items: I) -> DelayedFormat + where I: Iterator> + Clone { + DelayedFormat::new_with_offset(Some(self.naive_local()), None, &self.offset, items) + } + + /// Formats the date with the specified format string. + /// See the [`format::strftime` module](../format/strftime/index.html) + /// on the supported escape sequences. + #[inline] + pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat> { + self.format_with_items(StrftimeItems::new(fmt)) + } +} + +impl Datelike for Date { + #[inline] fn year(&self) -> i32 { self.naive_local().year() } + #[inline] fn month(&self) -> u32 { self.naive_local().month() } + #[inline] fn month0(&self) -> u32 { self.naive_local().month0() } + #[inline] fn day(&self) -> u32 { self.naive_local().day() } + #[inline] fn day0(&self) -> u32 { self.naive_local().day0() } + #[inline] fn ordinal(&self) -> u32 { self.naive_local().ordinal() } + #[inline] fn ordinal0(&self) -> u32 { self.naive_local().ordinal0() } + #[inline] fn weekday(&self) -> Weekday { self.naive_local().weekday() } + #[inline] fn isoweekdate(&self) -> (i32, u32, Weekday) { self.naive_local().isoweekdate() } + + #[inline] + fn with_year(&self, year: i32) -> Option> { + map_local(self, |date| date.with_year(year)) + } + + #[inline] + fn with_month(&self, month: u32) -> Option> { + map_local(self, |date| date.with_month(month)) + } + + #[inline] + fn with_month0(&self, month0: u32) -> Option> { + map_local(self, |date| date.with_month0(month0)) + } + + #[inline] + fn with_day(&self, day: u32) -> Option> { + map_local(self, |date| date.with_day(day)) + } + + #[inline] + fn with_day0(&self, day0: u32) -> Option> { + map_local(self, |date| date.with_day0(day0)) + } + + #[inline] + fn with_ordinal(&self, ordinal: u32) -> Option> { + map_local(self, |date| date.with_ordinal(ordinal)) + } + + #[inline] + fn with_ordinal0(&self, ordinal0: u32) -> Option> { + map_local(self, |date| date.with_ordinal0(ordinal0)) + } +} + +// we need them as automatic impls cannot handle associated types +impl Copy for Date where ::Offset: Copy {} +unsafe impl Send for Date where ::Offset: Send {} + +impl PartialEq> for Date { + fn eq(&self, other: &Date) -> bool { self.date == other.date } +} + +impl Eq for Date { +} + +impl PartialOrd for Date { + fn partial_cmp(&self, other: &Date) -> Option { + self.date.partial_cmp(&other.date) + } +} + +impl Ord for Date { + fn cmp(&self, other: &Date) -> Ordering { self.date.cmp(&other.date) } +} + +impl hash::Hash for Date { + fn hash(&self, state: &mut H) { self.date.hash(state) } +} + +impl Add for Date { + type Output = Date; + + #[inline] + fn add(self, rhs: Duration) -> Date { + self.checked_add(rhs).expect("`Date + Duration` overflowed") + } +} + +impl Sub> for Date { + type Output = Duration; + + #[inline] + fn sub(self, rhs: Date) -> Duration { self.date - rhs.date } +} + +impl Sub for Date { + type Output = Date; + + #[inline] + fn sub(self, rhs: Duration) -> Date { + self.checked_sub(rhs).expect("`Date - Duration` overflowed") + } +} + +impl fmt::Debug for Date { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}{:?}", self.naive_local(), self.offset) + } +} + +impl fmt::Display for Date where Tz::Offset: fmt::Display { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}{}", self.naive_local(), self.offset) + } +} + +#[cfg(feature = "rustc-serialize")] +mod rustc_serialize { + use super::Date; + use offset::TimeZone; + use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; + + // TODO the current serialization format is NEVER intentionally defined. + // in the future it is likely to be redefined to more sane and reasonable format. + + impl Encodable for Date where Tz::Offset: Encodable { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_struct("Date", 2, |s| { + try!(s.emit_struct_field("date", 0, |s| self.date.encode(s))); + try!(s.emit_struct_field("offset", 1, |s| self.offset.encode(s))); + Ok(()) + }) + } + } + + impl Decodable for Date where Tz::Offset: Decodable { + fn decode(d: &mut D) -> Result, D::Error> { + d.read_struct("Date", 2, |d| { + let date = try!(d.read_struct_field("date", 0, Decodable::decode)); + let offset = try!(d.read_struct_field("offset", 1, Decodable::decode)); + Ok(Date::from_utc(date, offset)) + }) + } + } + + #[test] + fn test_encodable() { + use offset::utc::UTC; + use rustc_serialize::json::encode; + + assert_eq!(encode(&UTC.ymd(2014, 7, 24)).ok(), + Some(r#"{"date":{"ymdf":16501977},"offset":{}}"#.into())); + } + + #[test] + fn test_decodable() { + use offset::utc::UTC; + use rustc_serialize::json; + + let decode = |s: &str| json::decode::>(s); + + assert_eq!(decode(r#"{"date":{"ymdf":16501977},"offset":{}}"#).ok(), + Some(UTC.ymd(2014, 7, 24))); + + assert!(decode(r#"{"date":{"ymdf":0},"offset":{}}"#).is_err()); + } +} + +#[cfg(test)] +mod tests { + use std::fmt; + + use Datelike; + use duration::Duration; + use naive::date::NaiveDate; + use naive::datetime::NaiveDateTime; + use offset::{TimeZone, Offset, LocalResult}; + use offset::local::Local; + + #[derive(Copy, Clone, PartialEq, Eq)] + struct UTC1y; // same to UTC but with an offset of 365 days + + #[derive(Copy, Clone, PartialEq, Eq)] + struct OneYear; + + impl TimeZone for UTC1y { + type Offset = OneYear; + + fn from_offset(_offset: &OneYear) -> UTC1y { UTC1y } + + fn offset_from_local_date(&self, _local: &NaiveDate) -> LocalResult { + LocalResult::Single(OneYear) + } + fn offset_from_local_datetime(&self, _local: &NaiveDateTime) -> LocalResult { + LocalResult::Single(OneYear) + } + + fn offset_from_utc_date(&self, _utc: &NaiveDate) -> OneYear { OneYear } + fn offset_from_utc_datetime(&self, _utc: &NaiveDateTime) -> OneYear { OneYear } + } + + impl Offset for OneYear { + fn local_minus_utc(&self) -> Duration { Duration::days(365) } + } + + impl fmt::Debug for OneYear { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "+8760:00") } + } + + #[test] + fn test_date_weird_offset() { + assert_eq!(format!("{:?}", UTC1y.ymd(2012, 2, 29)), + "2012-02-29+8760:00".to_string()); + assert_eq!(format!("{:?}", UTC1y.ymd(2012, 2, 29).and_hms(5, 6, 7)), + "2012-02-29T05:06:07+8760:00".to_string()); + assert_eq!(format!("{:?}", UTC1y.ymd(2012, 3, 4)), + "2012-03-04+8760:00".to_string()); + assert_eq!(format!("{:?}", UTC1y.ymd(2012, 3, 4).and_hms(5, 6, 7)), + "2012-03-04T05:06:07+8760:00".to_string()); + } + + #[test] + fn test_local_date_sanity_check() { // issue #27 + assert_eq!(Local.ymd(2999, 12, 28).day(), 28); + } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/datetime.rs cargo-0.19.0/vendor/chrono-0.2.25/src/datetime.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/datetime.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/datetime.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,723 @@ +// This is a part of rust-chrono. +// Copyright (c) 2014-2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +/*! + * ISO 8601 date and time with time zone. + */ + +use std::{str, fmt, hash}; +use std::cmp::Ordering; +use std::ops::{Add, Sub}; + +use {Weekday, Timelike, Datelike}; +use offset::{TimeZone, Offset}; +use offset::utc::UTC; +use offset::local::Local; +use offset::fixed::FixedOffset; +use duration::Duration; +use naive::time::NaiveTime; +use naive::datetime::NaiveDateTime; +use date::Date; +use format::{Item, Numeric, Pad, Fixed}; +use format::{parse, Parsed, ParseError, ParseResult, DelayedFormat, StrftimeItems}; + +/// ISO 8601 combined date and time with time zone. +#[derive(Clone)] +pub struct DateTime { + datetime: NaiveDateTime, + offset: Tz::Offset, +} + +impl DateTime { + /// Makes a new `DateTime` with given *UTC* datetime and offset. + /// The local datetime should be constructed via the `TimeZone` trait. + // + // note: this constructor is purposedly not named to `new` to discourage the direct usage. + #[inline] + pub fn from_utc(datetime: NaiveDateTime, offset: Tz::Offset) -> DateTime { + DateTime { datetime: datetime, offset: offset } + } + + /// Retrieves a date component. + #[inline] + pub fn date(&self) -> Date { + Date::from_utc(self.naive_local().date(), self.offset.clone()) + } + + /// Retrieves a time component. + /// Unlike `date`, this is not associated to the time zone. + #[inline] + pub fn time(&self) -> NaiveTime { + self.datetime.time() + self.offset.local_minus_utc() + } + + /// Returns the number of non-leap seconds since January 1, 1970 0:00:00 UTC + /// (aka "UNIX timestamp"). + #[inline] + pub fn timestamp(&self) -> i64 { + self.datetime.timestamp() + } + + /// Returns the number of milliseconds since the last second boundary + /// + /// warning: in event of a leap second, this may exceed 999 + /// + /// note: this is not the number of milliseconds since January 1, 1970 0:00:00 UTC + #[inline] + pub fn timestamp_subsec_millis(&self) -> u32 { + self.datetime.timestamp_subsec_millis() + } + + /// Returns the number of microseconds since the last second boundary + /// + /// warning: in event of a leap second, this may exceed 999_999 + /// + /// note: this is not the number of microseconds since January 1, 1970 0:00:00 UTC + #[inline] + pub fn timestamp_subsec_micros(&self) -> u32 { + self.datetime.timestamp_subsec_micros() + } + + /// Returns the number of nanoseconds since the last second boundary + /// + /// warning: in event of a leap second, this may exceed 999_999_999 + /// + /// note: this is not the number of nanoseconds since January 1, 1970 0:00:00 UTC + #[inline] + pub fn timestamp_subsec_nanos(&self) -> u32 { + self.datetime.timestamp_subsec_nanos() + } + + /// *Deprecated*: Same to `DateTime::timestamp`. + #[inline] + pub fn num_seconds_from_unix_epoch(&self) -> i64 { + self.timestamp() + } + + /// Retrieves an associated offset from UTC. + #[inline] + pub fn offset<'a>(&'a self) -> &'a Tz::Offset { + &self.offset + } + + /// Retrieves an associated time zone. + #[inline] + pub fn timezone(&self) -> Tz { + TimeZone::from_offset(&self.offset) + } + + /// Changes the associated time zone. + /// This does not change the actual `DateTime` (but will change the string representation). + #[inline] + pub fn with_timezone(&self, tz: &Tz2) -> DateTime { + tz.from_utc_datetime(&self.datetime) + } + + /// Adds given `Duration` to the current date and time. + /// + /// Returns `None` when it will result in overflow. + #[inline] + pub fn checked_add(self, rhs: Duration) -> Option> { + let datetime = try_opt!(self.datetime.checked_add(rhs)); + Some(DateTime { datetime: datetime, offset: self.offset }) + } + + /// Subtracts given `Duration` from the current date and time. + /// + /// Returns `None` when it will result in overflow. + #[inline] + pub fn checked_sub(self, rhs: Duration) -> Option> { + let datetime = try_opt!(self.datetime.checked_sub(rhs)); + Some(DateTime { datetime: datetime, offset: self.offset }) + } + + /// Returns a view to the naive UTC datetime. + #[inline] + pub fn naive_utc(&self) -> NaiveDateTime { + self.datetime + } + + /// Returns a view to the naive local datetime. + #[inline] + pub fn naive_local(&self) -> NaiveDateTime { + self.datetime + self.offset.local_minus_utc() + } +} + +/// Maps the local datetime to other datetime with given conversion function. +fn map_local(dt: &DateTime, mut f: F) -> Option> + where F: FnMut(NaiveDateTime) -> Option { + f(dt.naive_local()).and_then(|datetime| dt.timezone().from_local_datetime(&datetime).single()) +} + +impl DateTime { + /// Parses an RFC 2822 date and time string such as `Tue, 1 Jul 2003 10:52:37 +0200`, + /// then returns a new `DateTime` with a parsed `FixedOffset`. + pub fn parse_from_rfc2822(s: &str) -> ParseResult> { + const ITEMS: &'static [Item<'static>] = &[Item::Fixed(Fixed::RFC2822)]; + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parsed.to_datetime() + } + + /// Parses an RFC 3339 and ISO 8601 date and time string such as `1996-12-19T16:39:57-08:00`, + /// then returns a new `DateTime` with a parsed `FixedOffset`. + /// + /// Why isn't this named `parse_from_iso8601`? That's because ISO 8601 allows some freedom + /// over the syntax and RFC 3339 exercises that freedom to rigidly define a fixed format. + pub fn parse_from_rfc3339(s: &str) -> ParseResult> { + const ITEMS: &'static [Item<'static>] = &[Item::Fixed(Fixed::RFC3339)]; + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parsed.to_datetime() + } + + /// Parses a string with the specified format string and + /// returns a new `DateTime` with a parsed `FixedOffset`. + /// See the [`format::strftime` module](../format/strftime/index.html) + /// on the supported escape sequences. + /// + /// See also `Offset::datetime_from_str` which gives a local `DateTime` on specific time zone. + pub fn parse_from_str(s: &str, fmt: &str) -> ParseResult> { + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, StrftimeItems::new(fmt))); + parsed.to_datetime() + } +} + +impl DateTime where Tz::Offset: fmt::Display { + /// Returns an RFC 2822 date and time string such as `Tue, 1 Jul 2003 10:52:37 +0200`. + pub fn to_rfc2822(&self) -> String { + const ITEMS: &'static [Item<'static>] = &[Item::Fixed(Fixed::RFC2822)]; + self.format_with_items(ITEMS.iter().cloned()).to_string() + } + + /// Returns an RFC 3339 and ISO 8601 date and time string such as `1996-12-19T16:39:57-08:00`. + pub fn to_rfc3339(&self) -> String { + const ITEMS: &'static [Item<'static>] = &[Item::Fixed(Fixed::RFC3339)]; + self.format_with_items(ITEMS.iter().cloned()).to_string() + } + + /// Formats the combined date and time with the specified formatting items. + #[inline] + pub fn format_with_items<'a, I>(&self, items: I) -> DelayedFormat + where I: Iterator> + Clone { + let local = self.naive_local(); + DelayedFormat::new_with_offset(Some(local.date()), Some(local.time()), &self.offset, items) + } + + /// Formats the combined date and time with the specified format string. + /// See the [`format::strftime` module](../format/strftime/index.html) + /// on the supported escape sequences. + #[inline] + pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat> { + self.format_with_items(StrftimeItems::new(fmt)) + } +} + +impl Datelike for DateTime { + #[inline] fn year(&self) -> i32 { self.naive_local().year() } + #[inline] fn month(&self) -> u32 { self.naive_local().month() } + #[inline] fn month0(&self) -> u32 { self.naive_local().month0() } + #[inline] fn day(&self) -> u32 { self.naive_local().day() } + #[inline] fn day0(&self) -> u32 { self.naive_local().day0() } + #[inline] fn ordinal(&self) -> u32 { self.naive_local().ordinal() } + #[inline] fn ordinal0(&self) -> u32 { self.naive_local().ordinal0() } + #[inline] fn weekday(&self) -> Weekday { self.naive_local().weekday() } + #[inline] fn isoweekdate(&self) -> (i32, u32, Weekday) { self.naive_local().isoweekdate() } + + #[inline] + fn with_year(&self, year: i32) -> Option> { + map_local(self, |datetime| datetime.with_year(year)) + } + + #[inline] + fn with_month(&self, month: u32) -> Option> { + map_local(self, |datetime| datetime.with_month(month)) + } + + #[inline] + fn with_month0(&self, month0: u32) -> Option> { + map_local(self, |datetime| datetime.with_month0(month0)) + } + + #[inline] + fn with_day(&self, day: u32) -> Option> { + map_local(self, |datetime| datetime.with_day(day)) + } + + #[inline] + fn with_day0(&self, day0: u32) -> Option> { + map_local(self, |datetime| datetime.with_day0(day0)) + } + + #[inline] + fn with_ordinal(&self, ordinal: u32) -> Option> { + map_local(self, |datetime| datetime.with_ordinal(ordinal)) + } + + #[inline] + fn with_ordinal0(&self, ordinal0: u32) -> Option> { + map_local(self, |datetime| datetime.with_ordinal0(ordinal0)) + } +} + +impl Timelike for DateTime { + #[inline] fn hour(&self) -> u32 { self.naive_local().hour() } + #[inline] fn minute(&self) -> u32 { self.naive_local().minute() } + #[inline] fn second(&self) -> u32 { self.naive_local().second() } + #[inline] fn nanosecond(&self) -> u32 { self.naive_local().nanosecond() } + + #[inline] + fn with_hour(&self, hour: u32) -> Option> { + map_local(self, |datetime| datetime.with_hour(hour)) + } + + #[inline] + fn with_minute(&self, min: u32) -> Option> { + map_local(self, |datetime| datetime.with_minute(min)) + } + + #[inline] + fn with_second(&self, sec: u32) -> Option> { + map_local(self, |datetime| datetime.with_second(sec)) + } + + #[inline] + fn with_nanosecond(&self, nano: u32) -> Option> { + map_local(self, |datetime| datetime.with_nanosecond(nano)) + } +} + +// we need them as automatic impls cannot handle associated types +impl Copy for DateTime where ::Offset: Copy {} +unsafe impl Send for DateTime where ::Offset: Send {} + +impl PartialEq> for DateTime { + fn eq(&self, other: &DateTime) -> bool { self.datetime == other.datetime } +} + +impl Eq for DateTime { +} + +impl PartialOrd for DateTime { + fn partial_cmp(&self, other: &DateTime) -> Option { + self.datetime.partial_cmp(&other.datetime) + } +} + +impl Ord for DateTime { + fn cmp(&self, other: &DateTime) -> Ordering { self.datetime.cmp(&other.datetime) } +} + +impl hash::Hash for DateTime { + fn hash(&self, state: &mut H) { self.datetime.hash(state) } +} + +impl Add for DateTime { + type Output = DateTime; + + #[inline] + fn add(self, rhs: Duration) -> DateTime { + self.checked_add(rhs).expect("`DateTime + Duration` overflowed") + } +} + +impl Sub> for DateTime { + type Output = Duration; + + #[inline] + fn sub(self, rhs: DateTime) -> Duration { self.datetime - rhs.datetime } +} + +impl Sub for DateTime { + type Output = DateTime; + + #[inline] + fn sub(self, rhs: Duration) -> DateTime { + self.checked_sub(rhs).expect("`DateTime - Duration` overflowed") + } +} + +impl fmt::Debug for DateTime { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}{:?}", self.naive_local(), self.offset) + } +} + +impl fmt::Display for DateTime where Tz::Offset: fmt::Display { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} {}", self.naive_local(), self.offset) + } +} + +impl str::FromStr for DateTime { + type Err = ParseError; + + fn from_str(s: &str) -> ParseResult> { + const ITEMS: &'static [Item<'static>] = &[ + Item::Space(""), Item::Numeric(Numeric::Year, Pad::Zero), + Item::Space(""), Item::Literal("-"), + Item::Space(""), Item::Numeric(Numeric::Month, Pad::Zero), + Item::Space(""), Item::Literal("-"), + Item::Space(""), Item::Numeric(Numeric::Day, Pad::Zero), + Item::Space(""), Item::Literal("T"), // XXX shouldn't this be case-insensitive? + Item::Space(""), Item::Numeric(Numeric::Hour, Pad::Zero), + Item::Space(""), Item::Literal(":"), + Item::Space(""), Item::Numeric(Numeric::Minute, Pad::Zero), + Item::Space(""), Item::Literal(":"), + Item::Space(""), Item::Numeric(Numeric::Second, Pad::Zero), + Item::Fixed(Fixed::Nanosecond), + Item::Space(""), Item::Fixed(Fixed::TimezoneOffsetZ), + Item::Space(""), + ]; + + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parsed.to_datetime() + } +} + +impl str::FromStr for DateTime { + type Err = ParseError; + + fn from_str(s: &str) -> ParseResult> { + s.parse::>().map(|dt| dt.with_timezone(&UTC)) + } +} + +impl str::FromStr for DateTime { + type Err = ParseError; + + fn from_str(s: &str) -> ParseResult> { + s.parse::>().map(|dt| dt.with_timezone(&Local)) + } +} + +#[cfg(feature = "rustc-serialize")] +mod rustc_serialize { + use super::DateTime; + use offset::TimeZone; + use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; + + // TODO the current serialization format is NEVER intentionally defined. + // in the future it is likely to be redefined to more sane and reasonable format. + + impl Encodable for DateTime where Tz::Offset: Encodable { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_struct("DateTime", 2, |s| { + try!(s.emit_struct_field("datetime", 0, |s| self.datetime.encode(s))); + try!(s.emit_struct_field("offset", 1, |s| self.offset.encode(s))); + Ok(()) + }) + } + } + + impl Decodable for DateTime where Tz::Offset: Decodable { + fn decode(d: &mut D) -> Result, D::Error> { + d.read_struct("DateTime", 2, |d| { + let datetime = try!(d.read_struct_field("datetime", 0, Decodable::decode)); + let offset = try!(d.read_struct_field("offset", 1, Decodable::decode)); + Ok(DateTime::from_utc(datetime, offset)) + }) + } + } + + #[test] + fn test_encodable() { + use offset::utc::UTC; + use rustc_serialize::json::encode; + + assert_eq!( + encode(&UTC.ymd(2014, 7, 24).and_hms(12, 34, 6)).ok(), + Some(concat!(r#"{"datetime":{"date":{"ymdf":16501977},"#, + r#""time":{"secs":45246,"frac":0}},"#, + r#""offset":{}}"#).into())); + } + + #[test] + fn test_decodable() { + use offset::utc::UTC; + use rustc_serialize::json; + + let decode = |s: &str| json::decode::>(s); + + assert_eq!( + decode(r#"{"datetime":{"date":{"ymdf":16501977}, + "time":{"secs":45246,"frac":0}}, + "offset":{}}"#).ok(), + Some(UTC.ymd(2014, 7, 24).and_hms(12, 34, 6))); + + assert_eq!( + decode(r#"{"datetime":{"date":{"ymdf":0}, + "time":{"secs":0,"frac":0}}, + "offset":{}}"#).ok(), + None); + } +} + +#[cfg(feature = "serde")] +mod serde { + use super::DateTime; + use offset::TimeZone; + use offset::utc::UTC; + use offset::local::Local; + use offset::fixed::FixedOffset; + use std::fmt::Display; + use serde::{ser, de}; + + // TODO not very optimized for space (binary formats would want something better) + + impl ser::Serialize for DateTime + where Tz::Offset: Display + { + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + where S: ser::Serializer + { + // Debug formatting is correct RFC3339, and it allows Zulu. + serializer.serialize_str(&format!("{:?}", self)) + } + } + + struct DateTimeVisitor; + + impl de::Visitor for DateTimeVisitor { + type Value = DateTime; + + fn visit_str(&mut self, value: &str) -> Result, E> + where E: de::Error + { + value.parse().map_err(|err| E::custom(format!("{}", err))) + } + } + + impl de::Deserialize for DateTime { + fn deserialize(deserializer: &mut D) -> Result + where D: de::Deserializer + { + deserializer.deserialize(DateTimeVisitor) + } + } + + impl de::Deserialize for DateTime { + fn deserialize(deserializer: &mut D) -> Result + where D: de::Deserializer + { + deserializer.deserialize(DateTimeVisitor).map(|dt| dt.with_timezone(&UTC)) + } + } + + impl de::Deserialize for DateTime { + fn deserialize(deserializer: &mut D) -> Result + where D: de::Deserializer + { + deserializer.deserialize(DateTimeVisitor).map(|dt| dt.with_timezone(&Local)) + } + } + + #[cfg(test)] extern crate serde_json; + + #[test] + fn test_serde_serialize() { + use self::serde_json::to_string; + + assert_eq!(to_string(&UTC.ymd(2014, 7, 24).and_hms(12, 34, 6)).ok(), + Some(r#""2014-07-24T12:34:06Z""#.into())); + } + + #[test] + fn test_serde_deserialize() { + use self::serde_json; + + let from_str = |s: &str| serde_json::from_str::>(s); + + assert_eq!(from_str(r#""2014-07-24T12:34:06Z""#).ok(), + Some(UTC.ymd(2014, 7, 24).and_hms(12, 34, 6))); + + assert!(from_str(r#""2014-07-32T12:34:06Z""#).is_err()); + } +} + +#[cfg(test)] +mod tests { + use super::DateTime; + use Datelike; + use naive::time::NaiveTime; + use naive::date::NaiveDate; + use duration::Duration; + use offset::TimeZone; + use offset::utc::UTC; + use offset::local::Local; + use offset::fixed::FixedOffset; + + #[test] + #[allow(non_snake_case)] + fn test_datetime_offset() { + let EST = FixedOffset::west(5*60*60); + let EDT = FixedOffset::west(4*60*60); + let KST = FixedOffset::east(9*60*60); + + assert_eq!(format!("{}", UTC.ymd(2014, 5, 6).and_hms(7, 8, 9)), + "2014-05-06 07:08:09 UTC"); + assert_eq!(format!("{}", EDT.ymd(2014, 5, 6).and_hms(7, 8, 9)), + "2014-05-06 07:08:09 -04:00"); + assert_eq!(format!("{}", KST.ymd(2014, 5, 6).and_hms(7, 8, 9)), + "2014-05-06 07:08:09 +09:00"); + assert_eq!(format!("{:?}", UTC.ymd(2014, 5, 6).and_hms(7, 8, 9)), + "2014-05-06T07:08:09Z"); + assert_eq!(format!("{:?}", EDT.ymd(2014, 5, 6).and_hms(7, 8, 9)), + "2014-05-06T07:08:09-04:00"); + assert_eq!(format!("{:?}", KST.ymd(2014, 5, 6).and_hms(7, 8, 9)), + "2014-05-06T07:08:09+09:00"); + + // edge cases + assert_eq!(format!("{:?}", UTC.ymd(2014, 5, 6).and_hms(0, 0, 0)), + "2014-05-06T00:00:00Z"); + assert_eq!(format!("{:?}", EDT.ymd(2014, 5, 6).and_hms(0, 0, 0)), + "2014-05-06T00:00:00-04:00"); + assert_eq!(format!("{:?}", KST.ymd(2014, 5, 6).and_hms(0, 0, 0)), + "2014-05-06T00:00:00+09:00"); + assert_eq!(format!("{:?}", UTC.ymd(2014, 5, 6).and_hms(23, 59, 59)), + "2014-05-06T23:59:59Z"); + assert_eq!(format!("{:?}", EDT.ymd(2014, 5, 6).and_hms(23, 59, 59)), + "2014-05-06T23:59:59-04:00"); + assert_eq!(format!("{:?}", KST.ymd(2014, 5, 6).and_hms(23, 59, 59)), + "2014-05-06T23:59:59+09:00"); + + assert_eq!(UTC.ymd(2014, 5, 6).and_hms(7, 8, 9), EDT.ymd(2014, 5, 6).and_hms(3, 8, 9)); + assert_eq!(UTC.ymd(2014, 5, 6).and_hms(7, 8, 9) + Duration::seconds(3600 + 60 + 1), + UTC.ymd(2014, 5, 6).and_hms(8, 9, 10)); + assert_eq!(UTC.ymd(2014, 5, 6).and_hms(7, 8, 9) - EDT.ymd(2014, 5, 6).and_hms(10, 11, 12), + Duration::seconds(-7*3600 - 3*60 - 3)); + + assert_eq!(*UTC.ymd(2014, 5, 6).and_hms(7, 8, 9).offset(), UTC); + assert_eq!(*EDT.ymd(2014, 5, 6).and_hms(7, 8, 9).offset(), EDT); + assert!(*EDT.ymd(2014, 5, 6).and_hms(7, 8, 9).offset() != EST); + } + + #[test] + fn test_datetime_date_and_time() { + let tz = FixedOffset::east(5*60*60); + let d = tz.ymd(2014, 5, 6).and_hms(7, 8, 9); + assert_eq!(d.time(), NaiveTime::from_hms(7, 8, 9)); + assert_eq!(d.date(), tz.ymd(2014, 5, 6)); + assert_eq!(d.date().naive_local(), NaiveDate::from_ymd(2014, 5, 6)); + assert_eq!(d.date().and_time(d.time()), Some(d)); + + let tz = FixedOffset::east(4*60*60); + let d = tz.ymd(2016, 5, 4).and_hms(3, 2, 1); + assert_eq!(d.time(), NaiveTime::from_hms(3, 2, 1)); + assert_eq!(d.date(), tz.ymd(2016, 5, 4)); + assert_eq!(d.date().naive_local(), NaiveDate::from_ymd(2016, 5, 4)); + assert_eq!(d.date().and_time(d.time()), Some(d)); + + let tz = FixedOffset::west(13*60*60); + let d = tz.ymd(2017, 8, 9).and_hms(12, 34, 56); + assert_eq!(d.time(), NaiveTime::from_hms(12, 34, 56)); + assert_eq!(d.date(), tz.ymd(2017, 8, 9)); + assert_eq!(d.date().naive_local(), NaiveDate::from_ymd(2017, 8, 9)); + assert_eq!(d.date().and_time(d.time()), Some(d)); + } + + #[test] + fn test_datetime_with_timezone() { + let local_now = Local::now(); + let utc_now = local_now.with_timezone(&UTC); + let local_now2 = utc_now.with_timezone(&Local); + assert_eq!(local_now, local_now2); + } + + #[test] + #[allow(non_snake_case)] + fn test_datetime_rfc2822_and_rfc3339() { + let EDT = FixedOffset::east(5*60*60); + assert_eq!(UTC.ymd(2015, 2, 18).and_hms(23, 16, 9).to_rfc2822(), + "Wed, 18 Feb 2015 23:16:09 +0000"); + assert_eq!(UTC.ymd(2015, 2, 18).and_hms(23, 16, 9).to_rfc3339(), + "2015-02-18T23:16:09+00:00"); + assert_eq!(EDT.ymd(2015, 2, 18).and_hms_milli(23, 16, 9, 150).to_rfc2822(), + "Wed, 18 Feb 2015 23:16:09 +0500"); + assert_eq!(EDT.ymd(2015, 2, 18).and_hms_milli(23, 16, 9, 150).to_rfc3339(), + "2015-02-18T23:16:09.150+05:00"); + assert_eq!(EDT.ymd(2015, 2, 18).and_hms_micro(23, 59, 59, 1_234_567).to_rfc2822(), + "Wed, 18 Feb 2015 23:59:60 +0500"); + assert_eq!(EDT.ymd(2015, 2, 18).and_hms_micro(23, 59, 59, 1_234_567).to_rfc3339(), + "2015-02-18T23:59:60.234567+05:00"); + + assert_eq!(DateTime::parse_from_rfc2822("Wed, 18 Feb 2015 23:16:09 +0000"), + Ok(FixedOffset::east(0).ymd(2015, 2, 18).and_hms(23, 16, 9))); + assert_eq!(DateTime::parse_from_rfc3339("2015-02-18T23:16:09Z"), + Ok(FixedOffset::east(0).ymd(2015, 2, 18).and_hms(23, 16, 9))); + assert_eq!(DateTime::parse_from_rfc2822("Wed, 18 Feb 2015 23:59:60 +0500"), + Ok(EDT.ymd(2015, 2, 18).and_hms_milli(23, 59, 59, 1_000))); + assert_eq!(DateTime::parse_from_rfc3339("2015-02-18T23:59:60.234567+05:00"), + Ok(EDT.ymd(2015, 2, 18).and_hms_micro(23, 59, 59, 1_234_567))); + } + + #[test] + fn test_datetime_from_str() { + assert_eq!("2015-2-18T23:16:9.15Z".parse::>(), + Ok(FixedOffset::east(0).ymd(2015, 2, 18).and_hms_milli(23, 16, 9, 150))); + assert_eq!("2015-2-18T13:16:9.15-10:00".parse::>(), + Ok(FixedOffset::west(10 * 3600).ymd(2015, 2, 18).and_hms_milli(13, 16, 9, 150))); + assert!("2015-2-18T23:16:9.15".parse::>().is_err()); + + assert_eq!("2015-2-18T23:16:9.15Z".parse::>(), + Ok(UTC.ymd(2015, 2, 18).and_hms_milli(23, 16, 9, 150))); + assert_eq!("2015-2-18T13:16:9.15-10:00".parse::>(), + Ok(UTC.ymd(2015, 2, 18).and_hms_milli(23, 16, 9, 150))); + assert!("2015-2-18T23:16:9.15".parse::>().is_err()); + + // no test for `DateTime`, we cannot verify that much. + } + + #[test] + fn test_datetime_parse_from_str() { + let ymdhms = |y,m,d,h,n,s,off| FixedOffset::east(off).ymd(y,m,d).and_hms(h,n,s); + assert_eq!(DateTime::parse_from_str("2014-5-7T12:34:56+09:30", "%Y-%m-%dT%H:%M:%S%z"), + Ok(ymdhms(2014, 5, 7, 12, 34, 56, 570*60))); // ignore offset + assert!(DateTime::parse_from_str("20140507000000", "%Y%m%d%H%M%S").is_err()); // no offset + assert!(DateTime::parse_from_str("Fri, 09 Aug 2013 23:54:35 GMT", + "%a, %d %b %Y %H:%M:%S GMT").is_err()); + assert_eq!(UTC.datetime_from_str("Fri, 09 Aug 2013 23:54:35 GMT", + "%a, %d %b %Y %H:%M:%S GMT"), + Ok(UTC.ymd(2013, 8, 9).and_hms(23, 54, 35))); + } + + #[test] + fn test_datetime_format_with_local() { + // if we are not around the year boundary, local and UTC date should have the same year + let dt = Local::now().with_month(5).unwrap(); + assert_eq!(dt.format("%Y").to_string(), dt.with_timezone(&UTC).format("%Y").to_string()); + } + + #[test] + fn test_datetime_is_copy() { + // UTC is known to be `Copy`. + let a = UTC::now(); + let b = a; + assert_eq!(a, b); + } + + #[test] + fn test_datetime_is_send() { + use std::thread; + + // UTC is known to be `Send`. + let a = UTC::now(); + thread::spawn(move || { + let _ = a; + }).join().unwrap(); + } + + #[test] + fn test_subsecond_part() { + let datetime = UTC.ymd(2014, 7, 8).and_hms_nano(9, 10, 11, 1234567); + + assert_eq!(1, datetime.timestamp_subsec_millis()); + assert_eq!(1234, datetime.timestamp_subsec_micros()); + assert_eq!(1234567, datetime.timestamp_subsec_nanos()); + } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/div.rs cargo-0.19.0/vendor/chrono-0.2.25/src/div.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/div.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/div.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,43 @@ +// This is a part of rust-chrono. +// Copyright (c) 2014, Kang Seonghoon. +// Copyright 2013-2014 The Rust Project Developers. +// See README.md and LICENSE.txt for details. + +//! Integer division utilities. (Shamelessly copied from [num](https://github.com/rust-lang/num/)) + +// Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_, +// December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf) + +pub use num::integer::{div_rem, div_floor, mod_floor, div_mod_floor}; + +#[cfg(test)] +mod tests { + use super::{mod_floor, div_mod_floor}; + + #[test] + fn test_mod_floor() { + assert_eq!(mod_floor( 8, 3), 2); + assert_eq!(mod_floor( 8, -3), -1); + assert_eq!(mod_floor(-8, 3), 1); + assert_eq!(mod_floor(-8, -3), -2); + + assert_eq!(mod_floor( 1, 2), 1); + assert_eq!(mod_floor( 1, -2), -1); + assert_eq!(mod_floor(-1, 2), 1); + assert_eq!(mod_floor(-1, -2), -1); + } + + #[test] + fn test_div_mod_floor() { + assert_eq!(div_mod_floor( 8, 3), ( 2, 2)); + assert_eq!(div_mod_floor( 8, -3), (-3, -1)); + assert_eq!(div_mod_floor(-8, 3), (-3, 1)); + assert_eq!(div_mod_floor(-8, -3), ( 2, -2)); + + assert_eq!(div_mod_floor( 1, 2), ( 0, 1)); + assert_eq!(div_mod_floor( 1, -2), (-1, -1)); + assert_eq!(div_mod_floor(-1, 2), (-1, 1)); + assert_eq!(div_mod_floor(-1, -2), ( 0, -1)); + } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/format/mod.rs cargo-0.19.0/vendor/chrono-0.2.25/src/format/mod.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/format/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/format/mod.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,481 @@ +// This is a part of rust-chrono. +// Copyright (c) 2014-2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +//! Formatting utilities for date and time. + +use std::fmt; +use std::error::Error; + +use {Datelike, Timelike}; +use div::{div_floor, mod_floor}; +use duration::Duration; +use offset::Offset; +use naive::date::NaiveDate; +use naive::time::NaiveTime; + +pub use self::strftime::StrftimeItems; +pub use self::parsed::Parsed; +pub use self::parse::parse; + +/// Padding characters for numeric items. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Pad { + /// No padding. + None, + /// Zero (`0`) padding. + Zero, + /// Space padding. + Space, +} + +/// Numeric item types. +/// They have associated formatting width (FW) and parsing width (PW). +/// +/// The **formatting width** is the minimal width to be formatted. +/// If the number is too short, and the padding is not [`Pad::None`](./enum.Pad.html#variant.None), +/// then it is left-padded. +/// If the number is too long or (in some cases) negative, it is printed as is. +/// +/// The **parsing width** is the maximal width to be scanned. +/// The parser only tries to consume from one to given number of digits (greedily). +/// It also trims the preceding whitespaces if any. +/// It cannot parse the negative number, so some date and time cannot be formatted then +/// parsed with the same formatting items. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Numeric { + /// Full Gregorian year (FW=4, PW=∞). + /// May accept years before 1 BCE or after 9999 CE, given an initial sign. + Year, + /// Gregorian year divided by 100 (century number; FW=PW=2). Implies the non-negative year. + YearDiv100, + /// Gregorian year modulo 100 (FW=PW=2). Cannot be negative. + YearMod100, + /// Year in the ISO week date (FW=4, PW=∞). + /// May accept years before 1 BCE or after 9999 CE, given an initial sign. + IsoYear, + /// Year in the ISO week date, divided by 100 (FW=PW=2). Implies the non-negative year. + IsoYearDiv100, + /// Year in the ISO week date, modulo 100 (FW=PW=2). Cannot be negative. + IsoYearMod100, + /// Month (FW=PW=2). + Month, + /// Day of the month (FW=PW=2). + Day, + /// Week number, where the week 1 starts at the first Sunday of January (FW=PW=2). + WeekFromSun, + /// Week number, where the week 1 starts at the first Monday of January (FW=PW=2). + WeekFromMon, + /// Week number in the ISO week date (FW=PW=2). + IsoWeek, + /// Day of the week, where Sunday = 0 and Saturday = 6 (FW=PW=1). + NumDaysFromSun, + /// Day of the week, where Monday = 1 and Sunday = 7 (FW=PW=1). + WeekdayFromMon, + /// Day of the year (FW=PW=3). + Ordinal, + /// Hour number in the 24-hour clocks (FW=PW=2). + Hour, + /// Hour number in the 12-hour clocks (FW=PW=2). + Hour12, + /// The number of minutes since the last whole hour (FW=PW=2). + Minute, + /// The number of seconds since the last whole minute (FW=PW=2). + Second, + /// The number of nanoseconds since the last whole second (FW=PW=9). + /// Note that this is *not* left-aligned; + /// see also [`Fixed::Nanosecond`](./enum.Fixed.html#variant.Nanosecond). + Nanosecond, + /// The number of non-leap seconds since the midnight UTC on January 1, 1970 (FW=1, PW=∞). + /// For formatting, it assumes UTC upon the absence of time zone offset. + Timestamp, +} + +/// Fixed-format item types. +/// +/// They have their own rules of formatting and parsing. +/// Otherwise noted, they print in the specified cases but parse case-insensitively. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Fixed { + /// Abbreviated month names. + /// + /// Prints a three-letter-long name in the title case, reads the same name in any case. + ShortMonthName, + /// Full month names. + /// + /// Prints a full name in the title case, reads either a short or full name in any case. + LongMonthName, + /// Abbreviated day of the week names. + /// + /// Prints a three-letter-long name in the title case, reads the same name in any case. + ShortWeekdayName, + /// Full day of the week names. + /// + /// Prints a full name in the title case, reads either a short or full name in any case. + LongWeekdayName, + /// AM/PM. + /// + /// Prints in lower case, reads in any case. + LowerAmPm, + /// AM/PM. + /// + /// Prints in upper case, reads in any case. + UpperAmPm, + /// An optional dot plus one or more digits for left-aligned nanoseconds. + /// May print nothing, 3, 6 or 9 digits according to the available accuracy. + /// See also [`Numeric::Nanosecond`](./enum.Numeric.html#variant.Nanosecond). + Nanosecond, + /// Same to [`Nanosecond`](#variant.Nanosecond) but the accuracy is fixed to 3. + Nanosecond3, + /// Same to [`Nanosecond`](#variant.Nanosecond) but the accuracy is fixed to 6. + Nanosecond6, + /// Same to [`Nanosecond`](#variant.Nanosecond) but the accuracy is fixed to 9. + Nanosecond9, + /// Timezone name. + /// + /// It does not support parsing, its use in the parser is an immediate failure. + TimezoneName, + /// Offset from the local time to UTC (`+09:00` or `-04:00` or `+00:00`). + /// + /// In the parser, the colon can be omitted and/or surrounded with any amount of whitespaces. + /// The offset is limited from `-24:00` to `+24:00`, + /// which is same to [`FixedOffset`](../offset/fixed/struct.FixedOffset.html)'s range. + TimezoneOffsetColon, + /// Offset from the local time to UTC (`+09:00` or `-04:00` or `Z`). + /// + /// In the parser, the colon can be omitted and/or surrounded with any amount of whitespaces, + /// and `Z` can be either in upper case or in lower case. + /// The offset is limited from `-24:00` to `+24:00`, + /// which is same to [`FixedOffset`](../offset/fixed/struct.FixedOffset.html)'s range. + TimezoneOffsetColonZ, + /// Same to [`TimezoneOffsetColon`](#variant.TimezoneOffsetColon) but prints no colon. + /// Parsing allows an optional colon. + TimezoneOffset, + /// Same to [`TimezoneOffsetColonZ`](#variant.TimezoneOffsetColonZ) but prints no colon. + /// Parsing allows an optional colon. + TimezoneOffsetZ, + /// RFC 2822 date and time syntax. Commonly used for email and MIME date and time. + RFC2822, + /// RFC 3339 & ISO 8601 date and time syntax. + RFC3339, +} + +/// A single formatting item. This is used for both formatting and parsing. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum Item<'a> { + /// A literally printed and parsed text. + Literal(&'a str), + /// Whitespace. Prints literally but reads zero or more whitespace. + Space(&'a str), + /// Numeric item. Can be optionally padded to the maximal length (if any) when formatting; + /// the parser simply ignores any padded whitespace and zeroes. + Numeric(Numeric, Pad), + /// Fixed-format item. + Fixed(Fixed), + /// Issues a formatting error. Used to signal an invalid format string. + Error, +} + +macro_rules! lit { ($x:expr) => (Item::Literal($x)) } +macro_rules! sp { ($x:expr) => (Item::Space($x)) } +macro_rules! num { ($x:ident) => (Item::Numeric(Numeric::$x, Pad::None)) } +macro_rules! num0 { ($x:ident) => (Item::Numeric(Numeric::$x, Pad::Zero)) } +macro_rules! nums { ($x:ident) => (Item::Numeric(Numeric::$x, Pad::Space)) } +macro_rules! fix { ($x:ident) => (Item::Fixed(Fixed::$x)) } + +/// An error from the `parse` function. +#[derive(Debug, Clone, PartialEq, Copy)] +pub struct ParseError(ParseErrorKind); + +#[derive(Debug, Clone, PartialEq, Copy)] +enum ParseErrorKind { + /// Given field is out of permitted range. + OutOfRange, + + /// There is no possible date and time value with given set of fields. + /// + /// This does not include the out-of-range conditions, which are trivially invalid. + /// It includes the case that there are one or more fields that are inconsistent to each other. + Impossible, + + /// Given set of fields is not enough to make a requested date and time value. + /// + /// Note that there *may* be a case that given fields constrain the possible values so much + /// that there is a unique possible value. Chrono only tries to be correct for + /// most useful sets of fields however, as such constraint solving can be expensive. + NotEnough, + + /// The input string has some invalid character sequence for given formatting items. + Invalid, + + /// The input string has been prematurely ended. + TooShort, + + /// All formatting items have been read but there is a remaining input. + TooLong, + + /// There was an error on the formatting string, or there were non-supported formating items. + BadFormat, +} + +/// Same to `Result`. +pub type ParseResult = Result; + +impl fmt::Display for ParseError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.description().fmt(f) + } +} + +impl Error for ParseError { + fn description(&self) -> &str { + match self.0 { + ParseErrorKind::OutOfRange => "input is out of range", + ParseErrorKind::Impossible => "no possible date and time matching input", + ParseErrorKind::NotEnough => "input is not enough for unique date and time", + ParseErrorKind::Invalid => "input contains invalid characters", + ParseErrorKind::TooShort => "premature end of input", + ParseErrorKind::TooLong => "trailing input", + ParseErrorKind::BadFormat => "bad or unsupported format string", + } + } +} + +// to be used in this module and submodules +const OUT_OF_RANGE: ParseError = ParseError(ParseErrorKind::OutOfRange); +const IMPOSSIBLE: ParseError = ParseError(ParseErrorKind::Impossible); +const NOT_ENOUGH: ParseError = ParseError(ParseErrorKind::NotEnough); +const INVALID: ParseError = ParseError(ParseErrorKind::Invalid); +const TOO_SHORT: ParseError = ParseError(ParseErrorKind::TooShort); +const TOO_LONG: ParseError = ParseError(ParseErrorKind::TooLong); +const BAD_FORMAT: ParseError = ParseError(ParseErrorKind::BadFormat); + +/// Tries to format given arguments with given formatting items. +/// Internally used by `DelayedFormat`. +pub fn format<'a, I>(w: &mut fmt::Formatter, date: Option<&NaiveDate>, time: Option<&NaiveTime>, + off: Option<&(String, Duration)>, items: I) -> fmt::Result + where I: Iterator> { + // full and abbreviated month and weekday names + static SHORT_MONTHS: [&'static str; 12] = + ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; + static LONG_MONTHS: [&'static str; 12] = + ["January", "February", "March", "April", "May", "June", + "July", "August", "September", "October", "November", "December"]; + static SHORT_WEEKDAYS: [&'static str; 7] = + ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]; + static LONG_WEEKDAYS: [&'static str; 7] = + ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]; + + for item in items { + match item { + Item::Literal(s) | Item::Space(s) => try!(write!(w, "{}", s)), + + Item::Numeric(spec, pad) => { + use self::Numeric::*; + + let week_from_sun = |d: &NaiveDate| + (d.ordinal() as i32 - d.weekday().num_days_from_sunday() as i32 + 7) / 7; + let week_from_mon = |d: &NaiveDate| + (d.ordinal() as i32 - d.weekday().num_days_from_monday() as i32 + 7) / 7; + + let (width, v) = match spec { + Year => (4, date.map(|d| d.year() as i64)), + YearDiv100 => (2, date.map(|d| div_floor(d.year() as i64, 100))), + YearMod100 => (2, date.map(|d| mod_floor(d.year() as i64, 100))), + IsoYear => (4, date.map(|d| d.isoweekdate().0 as i64)), + IsoYearDiv100 => (2, date.map(|d| div_floor(d.isoweekdate().0 as i64, 100))), + IsoYearMod100 => (2, date.map(|d| mod_floor(d.isoweekdate().0 as i64, 100))), + Month => (2, date.map(|d| d.month() as i64)), + Day => (2, date.map(|d| d.day() as i64)), + WeekFromSun => (2, date.map(|d| week_from_sun(d) as i64)), + WeekFromMon => (2, date.map(|d| week_from_mon(d) as i64)), + IsoWeek => (2, date.map(|d| d.isoweekdate().1 as i64)), + NumDaysFromSun => (1, date.map(|d| d.weekday().num_days_from_sunday() as i64)), + WeekdayFromMon => (1, date.map(|d| d.weekday().number_from_monday() as i64)), + Ordinal => (3, date.map(|d| d.ordinal() as i64)), + Hour => (2, time.map(|t| t.hour() as i64)), + Hour12 => (2, time.map(|t| t.hour12().1 as i64)), + Minute => (2, time.map(|t| t.minute() as i64)), + Second => (2, time.map(|t| (t.second() + + t.nanosecond() / 1_000_000_000) as i64)), + Nanosecond => (9, time.map(|t| (t.nanosecond() % 1_000_000_000) as i64)), + Timestamp => (1, match (date, time, off) { + (Some(d), Some(t), None) => + Some(d.and_time(*t).timestamp()), + (Some(d), Some(t), Some(&(_, off))) => + Some((d.and_time(*t) - off).timestamp()), + (_, _, _) => None + }), + }; + + if let Some(v) = v { + if (spec == Year || spec == IsoYear) && !(0 <= v && v < 10000) { + // non-four-digit years require an explicit sign as per ISO 8601 + match pad { + Pad::None => try!(write!(w, "{:+}", v)), + Pad::Zero => try!(write!(w, "{:+01$}", v, width + 1)), + Pad::Space => try!(write!(w, "{:+1$}", v, width + 1)), + } + } else { + match pad { + Pad::None => try!(write!(w, "{}", v)), + Pad::Zero => try!(write!(w, "{:01$}", v, width)), + Pad::Space => try!(write!(w, "{:1$}", v, width)), + } + } + } else { + return Err(fmt::Error); // insufficient arguments for given format + } + }, + + Item::Fixed(spec) => { + use self::Fixed::*; + + /// Prints an offset from UTC in the format of `+HHMM` or `+HH:MM`. + /// `Z` instead of `+00[:]00` is allowed when `allow_zulu` is true. + fn write_local_minus_utc(w: &mut fmt::Formatter, off: Duration, + allow_zulu: bool, use_colon: bool) -> fmt::Result { + let off = off.num_minutes(); + if !allow_zulu || off != 0 { + let (sign, off) = if off < 0 {('-', -off)} else {('+', off)}; + if use_colon { + write!(w, "{}{:02}:{:02}", sign, off / 60, off % 60) + } else { + write!(w, "{}{:02}{:02}", sign, off / 60, off % 60) + } + } else { + write!(w, "Z") + } + } + + let ret = match spec { + ShortMonthName => + date.map(|d| write!(w, "{}", SHORT_MONTHS[d.month0() as usize])), + LongMonthName => + date.map(|d| write!(w, "{}", LONG_MONTHS[d.month0() as usize])), + ShortWeekdayName => + date.map(|d| write!(w, "{}", + SHORT_WEEKDAYS[d.weekday().num_days_from_monday() as usize])), + LongWeekdayName => + date.map(|d| write!(w, "{}", + LONG_WEEKDAYS[d.weekday().num_days_from_monday() as usize])), + LowerAmPm => + time.map(|t| write!(w, "{}", if t.hour12().0 {"pm"} else {"am"})), + UpperAmPm => + time.map(|t| write!(w, "{}", if t.hour12().0 {"PM"} else {"AM"})), + Nanosecond => + time.map(|t| { + let nano = t.nanosecond() % 1_000_000_000; + if nano == 0 { + Ok(()) + } else if nano % 1_000_000 == 0 { + write!(w, ".{:03}", nano / 1_000_000) + } else if nano % 1_000 == 0 { + write!(w, ".{:06}", nano / 1_000) + } else { + write!(w, ".{:09}", nano) + } + }), + Nanosecond3 => + time.map(|t| { + let nano = t.nanosecond() % 1_000_000_000; + write!(w, ".{:03}", nano / 1_000_000) + }), + Nanosecond6 => + time.map(|t| { + let nano = t.nanosecond() % 1_000_000_000; + write!(w, ".{:06}", nano / 1_000) + }), + Nanosecond9 => + time.map(|t| { + let nano = t.nanosecond() % 1_000_000_000; + write!(w, ".{:09}", nano) + }), + TimezoneName => + off.map(|&(ref name, _)| write!(w, "{}", *name)), + TimezoneOffsetColon => + off.map(|&(_, off)| write_local_minus_utc(w, off, false, true)), + TimezoneOffsetColonZ => + off.map(|&(_, off)| write_local_minus_utc(w, off, true, true)), + TimezoneOffset => + off.map(|&(_, off)| write_local_minus_utc(w, off, false, false)), + TimezoneOffsetZ => + off.map(|&(_, off)| write_local_minus_utc(w, off, true, false)), + RFC2822 => // same to `%a, %e %b %Y %H:%M:%S %z` + if let (Some(d), Some(t), Some(&(_, off))) = (date, time, off) { + let sec = t.second() + t.nanosecond() / 1_000_000_000; + try!(write!(w, "{}, {:2} {} {:04} {:02}:{:02}:{:02} ", + SHORT_WEEKDAYS[d.weekday().num_days_from_monday() as usize], + d.day(), SHORT_MONTHS[d.month0() as usize], d.year(), + t.hour(), t.minute(), sec)); + Some(write_local_minus_utc(w, off, false, false)) + } else { + None + }, + RFC3339 => // same to `%Y-%m-%dT%H:%M:%S%.f%:z` + if let (Some(d), Some(t), Some(&(_, off))) = (date, time, off) { + // reuse `Debug` impls which already print ISO 8601 format. + // this is faster in this way. + try!(write!(w, "{:?}T{:?}", d, t)); + Some(write_local_minus_utc(w, off, false, true)) + } else { + None + }, + }; + + match ret { + Some(ret) => try!(ret), + None => return Err(fmt::Error), // insufficient arguments for given format + } + }, + + Item::Error => return Err(fmt::Error), + } + } + + Ok(()) +} + +pub mod parsed; + +// due to the size of parsing routines, they are in separate modules. +mod scan; +mod parse; + +pub mod strftime; + +/// A *temporary* object which can be used as an argument to `format!` or others. +/// This is normally constructed via `format` methods of each date and time type. +#[derive(Debug)] +pub struct DelayedFormat { + /// The date view, if any. + date: Option, + /// The time view, if any. + time: Option, + /// The name and local-to-UTC difference for the offset (timezone), if any. + off: Option<(String, Duration)>, + /// An iterator returning formatting items. + items: I, +} + +impl<'a, I: Iterator> + Clone> DelayedFormat { + /// Makes a new `DelayedFormat` value out of local date and time. + pub fn new(date: Option, time: Option, items: I) -> DelayedFormat { + DelayedFormat { date: date, time: time, off: None, items: items } + } + + /// Makes a new `DelayedFormat` value out of local date and time and UTC offset. + pub fn new_with_offset(date: Option, time: Option, + offset: &Off, items: I) -> DelayedFormat + where Off: Offset + fmt::Display { + let name_and_diff = (offset.to_string(), offset.local_minus_utc()); + DelayedFormat { date: date, time: time, off: Some(name_and_diff), items: items } + } +} + +impl<'a, I: Iterator> + Clone> fmt::Display for DelayedFormat { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + format(f, self.date.as_ref(), self.time.as_ref(), self.off.as_ref(), self.items.clone()) + } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/format/parsed.rs cargo-0.19.0/vendor/chrono-0.2.25/src/format/parsed.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/format/parsed.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/format/parsed.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,1085 @@ +// This is a part of rust-chrono. +// Copyright (c) 2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +//! A collection of parsed date and time items. +//! They can be constructed incrementally while being checked for consistency. + +use num::traits::ToPrimitive; + +use {Datelike, Timelike}; +use Weekday; +use div::div_rem; +use duration::Duration; +use offset::{TimeZone, Offset, LocalResult}; +use offset::fixed::FixedOffset; +use naive::date::NaiveDate; +use naive::time::NaiveTime; +use naive::datetime::NaiveDateTime; +use datetime::DateTime; +use super::{ParseResult, OUT_OF_RANGE, IMPOSSIBLE, NOT_ENOUGH}; + +/// Parsed parts of date and time. There are two classes of methods: +/// +/// - `set_*` methods try to set given field(s) while checking for the consistency. +/// It may or may not check for the range constraint immediately (for efficiency reasons). +/// +/// - `to_*` methods try to make a concrete date and time value out of set fields. +/// It fully checks any remaining out-of-range conditions and inconsistent/impossible fields. +#[allow(missing_copy_implementations)] +#[derive(Clone, PartialEq, Debug)] +pub struct Parsed { + /// Year. + /// + /// This can be negative unlike [`year_div_100`](#structfield.year_div_100) + /// and [`year_mod_100`](#structfield.year_mod_100) fields. + pub year: Option, + + /// Year divided by 100. Implies that the year is >= 1 BCE when set. + /// + /// Due to the common usage, if this field is missing but + /// [`year_mod_100`](#structfield.year_mod_100) is present, + /// it is inferred to 19 when `year_mod_100 >= 70` and 20 otherwise. + pub year_div_100: Option, + + /// Year modulo 100. Implies that the year is >= 1 BCE when set. + pub year_mod_100: Option, + + /// Year in the [ISO week date](../../naive/date/index.html#week-date). + /// + /// This can be negative unlike [`isoyear_div_100`](#structfield.isoyear_div_100) and + /// [`isoyear_mod_100`](#structfield.isoyear_mod_100) fields. + pub isoyear: Option, + + /// Year in the [ISO week date](../../naive/date/index.html#week-date), divided by 100. + /// Implies that the year is >= 1 BCE when set. + /// + /// Due to the common usage, if this field is missing but + /// [`isoyear_mod_100`](#structfield.isoyear_mod_100) is present, + /// it is inferred to 19 when `isoyear_mod_100 >= 70` and 20 otherwise. + pub isoyear_div_100: Option, + + /// Year in the [ISO week date](../../naive/date/index.html#week-date), modulo 100. + /// Implies that the year is >= 1 BCE when set. + pub isoyear_mod_100: Option, + + /// Month (1--12). + pub month: Option, + + /// Week number, where the week 1 starts at the first Sunday of January + /// (0--53, 1--53 or 1--52 depending on the year). + pub week_from_sun: Option, + + /// Week number, where the week 1 starts at the first Monday of January + /// (0--53, 1--53 or 1--52 depending on the year). + pub week_from_mon: Option, + + /// [ISO week number](../../naive/date/index.html#week-date) + /// (1--52 or 1--53 depending on the year). + pub isoweek: Option, + + /// Day of the week. + pub weekday: Option, + + /// Day of the year (1--365 or 1--366 depending on the year). + pub ordinal: Option, + + /// Day of the month (1--28, 1--29, 1--30 or 1--31 depending on the month). + pub day: Option, + + /// Hour number divided by 12 (0--1). 0 indicates AM and 1 indicates PM. + pub hour_div_12: Option, + + /// Hour number modulo 12 (0--11). + pub hour_mod_12: Option, + + /// Minute number (0--59). + pub minute: Option, + + /// Second number (0--60, accounting for leap seconds). + pub second: Option, + + /// The number of nanoseconds since the whole second (0--999,999,999). + pub nanosecond: Option, + + /// The number of non-leap seconds since the midnight UTC on January 1, 1970. + /// + /// This can be off by one if [`second`](#structfield.second) is 60 (a leap second). + pub timestamp: Option, + + /// Offset from the local time to UTC, in seconds. + pub offset: Option, +} + +/// Checks if `old` is either empty or has the same value to `new` (i.e. "consistent"), +/// and if it is empty, set `old` to `new` as well. +fn set_if_consistent(old: &mut Option, new: T) -> ParseResult<()> { + if let Some(ref old) = *old { + if *old == new {Ok(())} else {Err(IMPOSSIBLE)} + } else { + *old = Some(new); + Ok(()) + } +} + +impl Parsed { + /// Returns the initial value of parsed parts. + pub fn new() -> Parsed { + Parsed { year: None, year_div_100: None, year_mod_100: None, isoyear: None, + isoyear_div_100: None, isoyear_mod_100: None, month: None, + week_from_sun: None, week_from_mon: None, isoweek: None, weekday: None, + ordinal: None, day: None, hour_div_12: None, hour_mod_12: None, minute: None, + second: None, nanosecond: None, timestamp: None, offset: None } + } + + /// Tries to set the [`year`](#structfield.year) field from given value. + pub fn set_year(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.year, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`year_div_100`](#structfield.year_div_100) field from given value. + pub fn set_year_div_100(&mut self, value: i64) -> ParseResult<()> { + if value < 0 { return Err(OUT_OF_RANGE); } + set_if_consistent(&mut self.year_div_100, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`year_mod_100`](#structfield.year_mod_100) field from given value. + pub fn set_year_mod_100(&mut self, value: i64) -> ParseResult<()> { + if value < 0 { return Err(OUT_OF_RANGE); } + set_if_consistent(&mut self.year_mod_100, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`isoyear`](#structfield.isoyear) field from given value. + pub fn set_isoyear(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.isoyear, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`isoyear_div_100`](#structfield.isoyear_div_100) field from given value. + pub fn set_isoyear_div_100(&mut self, value: i64) -> ParseResult<()> { + if value < 0 { return Err(OUT_OF_RANGE); } + set_if_consistent(&mut self.isoyear_div_100, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`isoyear_mod_100`](#structfield.isoyear_mod_100) field from given value. + pub fn set_isoyear_mod_100(&mut self, value: i64) -> ParseResult<()> { + if value < 0 { return Err(OUT_OF_RANGE); } + set_if_consistent(&mut self.isoyear_mod_100, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`month`](#structfield.month) field from given value. + pub fn set_month(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.month, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`week_from_sun`](#structfield.week_from_sun) field from given value. + pub fn set_week_from_sun(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.week_from_sun, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`week_from_mon`](#structfield.week_from_mon) field from given value. + pub fn set_week_from_mon(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.week_from_mon, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`isoweek`](#structfield.isoweek) field from given value. + pub fn set_isoweek(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.isoweek, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`weekday`](#structfield.weekday) field from given value. + pub fn set_weekday(&mut self, value: Weekday) -> ParseResult<()> { + set_if_consistent(&mut self.weekday, value) + } + + /// Tries to set the [`ordinal`](#structfield.ordinal) field from given value. + pub fn set_ordinal(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.ordinal, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`day`](#structfield.day) field from given value. + pub fn set_day(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.day, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`hour_div_12`](#structfield.hour_div_12) field from given value. + /// (`false` for AM, `true` for PM) + pub fn set_ampm(&mut self, value: bool) -> ParseResult<()> { + set_if_consistent(&mut self.hour_div_12, if value {1} else {0}) + } + + /// Tries to set the [`hour_mod_12`](#structfield.hour_mod_12) field from + /// given hour number in 12-hour clocks. + pub fn set_hour12(&mut self, value: i64) -> ParseResult<()> { + if value < 1 || value > 12 { return Err(OUT_OF_RANGE); } + set_if_consistent(&mut self.hour_mod_12, value as u32 % 12) + } + + /// Tries to set both [`hour_div_12`](#structfield.hour_div_12) and + /// [`hour_mod_12`](#structfield.hour_mod_12) fields from given value. + pub fn set_hour(&mut self, value: i64) -> ParseResult<()> { + let v = try!(value.to_u32().ok_or(OUT_OF_RANGE)); + try!(set_if_consistent(&mut self.hour_div_12, v / 12)); + try!(set_if_consistent(&mut self.hour_mod_12, v % 12)); + Ok(()) + } + + /// Tries to set the [`minute`](#structfield.minute) field from given value. + pub fn set_minute(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.minute, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`second`](#structfield.second) field from given value. + pub fn set_second(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.second, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`nanosecond`](#structfield.nanosecond) field from given value. + pub fn set_nanosecond(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.nanosecond, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + } + + /// Tries to set the [`timestamp`](#structfield.timestamp) field from given value. + pub fn set_timestamp(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.timestamp, value) + } + + /// Tries to set the [`offset`](#structfield.offset) field from given value. + pub fn set_offset(&mut self, value: i64) -> ParseResult<()> { + set_if_consistent(&mut self.offset, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + } + + /// Returns a parsed naive date out of given fields. + /// + /// This method is able to determine the date from given subset of fields: + /// + /// - Year, month, day. + /// - Year, day of the year (ordinal). + /// - Year, week number counted from Sunday or Monday, day of the week. + /// - ISO week date. + /// + /// Gregorian year and ISO week date year can have their century number (`*_div_100`) omitted, + /// the two-digit year is used to guess the century number then. + pub fn to_naive_date(&self) -> ParseResult { + fn resolve_year(y: Option, q: Option, + r: Option) -> ParseResult> { + match (y, q, r) { + // if there is no further information, simply return the given full year. + // this is a common case, so let's avoid division here. + (y, None, None) => Ok(y), + + // if there is a full year *and* also quotient and/or modulo, + // check if present quotient and/or modulo is consistent to the full year. + // since the presence of those fields means a positive full year, + // we should filter a negative full year first. + (Some(y), q, r @ Some(0...99)) | (Some(y), q, r @ None) => { + if y < 0 { return Err(OUT_OF_RANGE); } + let (q_, r_) = div_rem(y, 100); + if q.unwrap_or(q_) == q_ && r.unwrap_or(r_) == r_ { + Ok(Some(y)) + } else { + Err(IMPOSSIBLE) + } + }, + + // the full year is missing but we have quotient and modulo. + // reconstruct the full year. make sure that the result is always positive. + (None, Some(q), Some(r @ 0...99)) => { + if q < 0 { return Err(OUT_OF_RANGE); } + let y = q.checked_mul(100).and_then(|v| v.checked_add(r)); + Ok(Some(try!(y.ok_or(OUT_OF_RANGE)))) + }, + + // we only have modulo. try to interpret a modulo as a conventional two-digit year. + // note: we are affected by Rust issue #18060. avoid multiple range patterns. + (None, None, Some(r @ 0...99)) => Ok(Some(r + if r < 70 {2000} else {1900})), + + // otherwise it is an out-of-bound or insufficient condition. + (None, Some(_), None) => Err(NOT_ENOUGH), + (_, _, Some(_)) => Err(OUT_OF_RANGE), + } + } + + let given_year = + try!(resolve_year(self.year, self.year_div_100, self.year_mod_100)); + let given_isoyear = + try!(resolve_year(self.isoyear, self.isoyear_div_100, self.isoyear_mod_100)); + + // verify the normal year-month-day date. + let verify_ymd = |date: NaiveDate| { + let year = date.year(); + let (year_div_100, year_mod_100) = if year >= 0 { + let (q, r) = div_rem(year, 100); + (Some(q), Some(r)) + } else { + (None, None) // they should be empty to be consistent + }; + let month = date.month(); + let day = date.day(); + (self.year.unwrap_or(year) == year && + self.year_div_100.or(year_div_100) == year_div_100 && + self.year_mod_100.or(year_mod_100) == year_mod_100 && + self.month.unwrap_or(month) == month && + self.day.unwrap_or(day) == day) + }; + + // verify the ISO week date. + let verify_isoweekdate = |date: NaiveDate| { + let (isoyear, isoweek, weekday) = date.isoweekdate(); + let (isoyear_div_100, isoyear_mod_100) = if isoyear >= 0 { + let (q, r) = div_rem(isoyear, 100); + (Some(q), Some(r)) + } else { + (None, None) // they should be empty to be consistent + }; + (self.isoyear.unwrap_or(isoyear) == isoyear && + self.isoyear_div_100.or(isoyear_div_100) == isoyear_div_100 && + self.isoyear_mod_100.or(isoyear_mod_100) == isoyear_mod_100 && + self.isoweek.unwrap_or(isoweek) == isoweek && + self.weekday.unwrap_or(weekday) == weekday) + }; + + // verify the ordinal and other (non-ISO) week dates. + let verify_ordinal = |date: NaiveDate| { + let ordinal = date.ordinal(); + let weekday = date.weekday(); + let week_from_sun = (ordinal as i32 - weekday.num_days_from_sunday() as i32 + 7) / 7; + let week_from_mon = (ordinal as i32 - weekday.num_days_from_monday() as i32 + 7) / 7; + (self.ordinal.unwrap_or(ordinal) == ordinal && + self.week_from_sun.map_or(week_from_sun, |v| v as i32) == week_from_sun && + self.week_from_mon.map_or(week_from_mon, |v| v as i32) == week_from_mon) + }; + + // test several possibilities. + // tries to construct a full `NaiveDate` as much as possible, then verifies that + // it is consistent with other given fields. + let (verified, parsed_date) = match (given_year, given_isoyear, self) { + (Some(year), _, &Parsed { month: Some(month), day: Some(day), .. }) => { + // year, month, day + let date = try!(NaiveDate::from_ymd_opt(year, month, day).ok_or(OUT_OF_RANGE)); + (verify_isoweekdate(date) && verify_ordinal(date), date) + }, + + (Some(year), _, &Parsed { ordinal: Some(ordinal), .. }) => { + // year, day of the year + let date = try!(NaiveDate::from_yo_opt(year, ordinal).ok_or(OUT_OF_RANGE)); + (verify_ymd(date) && verify_isoweekdate(date) && verify_ordinal(date), date) + }, + + (Some(year), _, &Parsed { week_from_sun: Some(week_from_sun), + weekday: Some(weekday), .. }) => { + // year, week (starting at 1st Sunday), day of the week + let newyear = try!(NaiveDate::from_yo_opt(year, 1).ok_or(OUT_OF_RANGE)); + let firstweek = match newyear.weekday() { + Weekday::Sun => 0, + Weekday::Mon => 6, + Weekday::Tue => 5, + Weekday::Wed => 4, + Weekday::Thu => 3, + Weekday::Fri => 2, + Weekday::Sat => 1, + }; + + // `firstweek+1`-th day of January is the beginning of the week 1. + if week_from_sun > 53 { return Err(OUT_OF_RANGE); } // can it overflow? + let ndays = firstweek + (week_from_sun as i32 - 1) * 7 + + weekday.num_days_from_sunday() as i32; + let date = try!(newyear.checked_add(Duration::days(ndays as i64)) + .ok_or(OUT_OF_RANGE)); + if date.year() != year { return Err(OUT_OF_RANGE); } // early exit for correct error + + (verify_ymd(date) && verify_isoweekdate(date) && verify_ordinal(date), date) + }, + + (Some(year), _, &Parsed { week_from_mon: Some(week_from_mon), + weekday: Some(weekday), .. }) => { + // year, week (starting at 1st Monday), day of the week + let newyear = try!(NaiveDate::from_yo_opt(year, 1).ok_or(OUT_OF_RANGE)); + let firstweek = match newyear.weekday() { + Weekday::Sun => 1, + Weekday::Mon => 0, + Weekday::Tue => 6, + Weekday::Wed => 5, + Weekday::Thu => 4, + Weekday::Fri => 3, + Weekday::Sat => 2, + }; + + // `firstweek+1`-th day of January is the beginning of the week 1. + if week_from_mon > 53 { return Err(OUT_OF_RANGE); } // can it overflow? + let ndays = firstweek + (week_from_mon as i32 - 1) * 7 + + weekday.num_days_from_monday() as i32; + let date = try!(newyear.checked_add(Duration::days(ndays as i64)) + .ok_or(OUT_OF_RANGE)); + if date.year() != year { return Err(OUT_OF_RANGE); } // early exit for correct error + + (verify_ymd(date) && verify_isoweekdate(date) && verify_ordinal(date), date) + }, + + (_, Some(isoyear), &Parsed { isoweek: Some(isoweek), weekday: Some(weekday), .. }) => { + // ISO year, week, day of the week + let date = NaiveDate::from_isoywd_opt(isoyear, isoweek, weekday); + let date = try!(date.ok_or(OUT_OF_RANGE)); + (verify_ymd(date) && verify_ordinal(date), date) + }, + + (_, _, _) => return Err(NOT_ENOUGH) + }; + + if verified { + Ok(parsed_date) + } else { + Err(IMPOSSIBLE) + } + } + + /// Returns a parsed naive time out of given fields. + /// + /// This method is able to determine the time from given subset of fields: + /// + /// - Hour, minute. (second and nanosecond assumed to be 0) + /// - Hour, minute, second. (nanosecond assumed to be 0) + /// - Hour, minute, second, nanosecond. + /// + /// It is able to handle leap seconds when given second is 60. + pub fn to_naive_time(&self) -> ParseResult { + let hour_div_12 = match self.hour_div_12 { + Some(v @ 0...1) => v, + Some(_) => return Err(OUT_OF_RANGE), + None => return Err(NOT_ENOUGH), + }; + let hour_mod_12 = match self.hour_mod_12 { + Some(v @ 0...11) => v, + Some(_) => return Err(OUT_OF_RANGE), + None => return Err(NOT_ENOUGH), + }; + let hour = hour_div_12 * 12 + hour_mod_12; + + let minute = match self.minute { + Some(v @ 0...59) => v, + Some(_) => return Err(OUT_OF_RANGE), + None => return Err(NOT_ENOUGH), + }; + + // we allow omitting seconds or nanoseconds, but they should be in the range. + let (second, mut nano) = match self.second.unwrap_or(0) { + v @ 0...59 => (v, 0), + 60 => (59, 1_000_000_000), + _ => return Err(OUT_OF_RANGE), + }; + nano += match self.nanosecond { + Some(v @ 0...999_999_999) if self.second.is_some() => v, + Some(0...999_999_999) => return Err(NOT_ENOUGH), // second is missing + Some(_) => return Err(OUT_OF_RANGE), + None => 0, + }; + + NaiveTime::from_hms_nano_opt(hour, minute, second, nano).ok_or(OUT_OF_RANGE) + } + + /// Returns a parsed naive date and time out of given fields, + /// except for the [`offset`](#structfield.offset) field (assumed to have a given value). + /// This is required for parsing a local time or other known-timezone inputs. + /// + /// This method is able to determine the combined date and time + /// from date and time fields or a single [`timestamp`](#structfield.timestamp) field. + /// Either way those fields have to be consistent to each other. + pub fn to_naive_datetime_with_offset(&self, offset: i32) -> ParseResult { + let date = self.to_naive_date(); + let time = self.to_naive_time(); + if let (Ok(date), Ok(time)) = (date, time) { + let datetime = date.and_time(time); + + // verify the timestamp field if any + // the following is safe, `timestamp` is very limited in range + let timestamp = datetime.timestamp() - offset as i64; + if let Some(given_timestamp) = self.timestamp { + // if `datetime` represents a leap second, it might be off by one second. + if given_timestamp != timestamp && + !(datetime.nanosecond() >= 1_000_000_000 && given_timestamp == timestamp + 1) { + return Err(IMPOSSIBLE); + } + } + + Ok(datetime) + } else if let Some(timestamp) = self.timestamp { + use super::ParseError as PE; + use super::ParseErrorKind::{OutOfRange, Impossible}; + + // if date and time is problematic already, there is no point proceeding. + // we at least try to give a correct error though. + match (date, time) { + (Err(PE(OutOfRange)), _) | (_, Err(PE(OutOfRange))) => return Err(OUT_OF_RANGE), + (Err(PE(Impossible)), _) | (_, Err(PE(Impossible))) => return Err(IMPOSSIBLE), + (_, _) => {} // one of them is insufficient + } + + // reconstruct date and time fields from timestamp + let ts = try!(timestamp.checked_add(offset as i64).ok_or(OUT_OF_RANGE)); + let datetime = NaiveDateTime::from_timestamp_opt(ts, 0); + let mut datetime = try!(datetime.ok_or(OUT_OF_RANGE)); + + // fill year, ordinal, hour, minute and second fields from timestamp. + // if existing fields are consistent, this will allow the full date/time reconstruction. + let mut parsed = self.clone(); + if parsed.second == Some(60) { + // `datetime.second()` cannot be 60, so this is the only case for a leap second. + match datetime.second() { + // it's okay, just do not try to overwrite the existing field. + 59 => {} + // `datetime` is known to be off by one second. + 0 => { datetime = datetime - Duration::seconds(1); } + // otherwise it is impossible. + _ => return Err(IMPOSSIBLE) + } + // ...and we have the correct candidates for other fields. + } else { + try!(parsed.set_second(datetime.second() as i64)); + } + try!(parsed.set_year (datetime.year() as i64)); + try!(parsed.set_ordinal(datetime.ordinal() as i64)); // more efficient than ymd + try!(parsed.set_hour (datetime.hour() as i64)); + try!(parsed.set_minute (datetime.minute() as i64)); + try!(parsed.set_nanosecond(0)); // no nanosecond precision in timestamp + + // validate other fields (e.g. week) and return + let date = try!(parsed.to_naive_date()); + let time = try!(parsed.to_naive_time()); + Ok(date.and_time(time)) + } else { + // reproduce the previous error(s) + try!(date); + try!(time); + unreachable!() + } + } + + /// Returns a parsed fixed time zone offset out of given fields. + pub fn to_fixed_offset(&self) -> ParseResult { + self.offset.and_then(|offset| FixedOffset::east_opt(offset)).ok_or(OUT_OF_RANGE) + } + + /// Returns a parsed timezone-aware date and time out of given fields. + /// + /// This method is able to determine the combined date and time + /// from date and time fields or a single [`timestamp`](#structfield.timestamp) field, + /// plus a time zone offset. + /// Either way those fields have to be consistent to each other. + pub fn to_datetime(&self) -> ParseResult> { + let offset = try!(self.offset.ok_or(NOT_ENOUGH)); + let datetime = try!(self.to_naive_datetime_with_offset(offset)); + let offset = try!(FixedOffset::east_opt(offset).ok_or(OUT_OF_RANGE)); + match offset.from_local_datetime(&datetime) { + LocalResult::None => Err(IMPOSSIBLE), + LocalResult::Single(t) => Ok(t), + LocalResult::Ambiguous(..) => Err(NOT_ENOUGH), + } + } + + /// Returns a parsed timezone-aware date and time out of given fields, + /// with an additional `TimeZone` used to interpret and validate the local date. + /// + /// This method is able to determine the combined date and time + /// from date and time fields or a single [`timestamp`](#structfield.timestamp) field, + /// plus a time zone offset. + /// Either way those fields have to be consistent to each other. + /// If parsed fields include an UTC offset, it also has to be consistent to + /// [`offset`](#structfield.offset). + pub fn to_datetime_with_timezone(&self, tz: &Tz) -> ParseResult> { + // if we have `timestamp` specified, guess an offset from that. + let mut guessed_offset = 0; + if let Some(timestamp) = self.timestamp { + // make a naive `DateTime` from given timestamp and (if any) nanosecond. + // an empty `nanosecond` is always equal to zero, so missing nanosecond is fine. + let nanosecond = self.nanosecond.unwrap_or(0); + let dt = NaiveDateTime::from_timestamp_opt(timestamp, nanosecond); + let dt = try!(dt.ok_or(OUT_OF_RANGE)); + + // we cannot handle offsets larger than i32 at all. give up if so. + // we can instead make `to_naive_datetime_with_offset` to accept i64, but this makes + // the algorithm too complex and tons of edge cases. i32 should be enough for all. + let offset = tz.offset_from_utc_datetime(&dt).local_minus_utc().num_seconds(); + guessed_offset = try!(offset.to_i32().ok_or(OUT_OF_RANGE)); + } + + // checks if the given `DateTime` has a consistent `Offset` with given `self.offset`. + let check_offset = |dt: &DateTime| { + if let Some(offset) = self.offset { + let delta = dt.offset().local_minus_utc().num_seconds(); + // if `delta` does not fit in `i32`, it cannot equal to `self.offset` anyway. + delta.to_i32() == Some(offset) + } else { + true + } + }; + + // `guessed_offset` should be correct when `self.timestamp` is given. + // it will be 0 otherwise, but this is fine as the algorithm ignores offset for that case. + let datetime = try!(self.to_naive_datetime_with_offset(guessed_offset)); + match tz.from_local_datetime(&datetime) { + LocalResult::None => Err(IMPOSSIBLE), + LocalResult::Single(t) => if check_offset(&t) {Ok(t)} else {Err(IMPOSSIBLE)}, + LocalResult::Ambiguous(min, max) => { + // try to disambiguate two possible local dates by offset. + match (check_offset(&min), check_offset(&max)) { + (false, false) => Err(IMPOSSIBLE), + (false, true) => Ok(max), + (true, false) => Ok(min), + (true, true) => Err(NOT_ENOUGH), + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::Parsed; + use super::super::{OUT_OF_RANGE, IMPOSSIBLE, NOT_ENOUGH}; + use Datelike; + use Weekday::*; + use naive::date::{self, NaiveDate}; + use naive::time::NaiveTime; + use offset::TimeZone; + use offset::utc::UTC; + use offset::fixed::FixedOffset; + + #[test] + fn test_parsed_set_fields() { + // year*, isoyear* + let mut p = Parsed::new(); + assert_eq!(p.set_year(1987), Ok(())); + assert_eq!(p.set_year(1986), Err(IMPOSSIBLE)); + assert_eq!(p.set_year(1988), Err(IMPOSSIBLE)); + assert_eq!(p.set_year(1987), Ok(())); + assert_eq!(p.set_year_div_100(20), Ok(())); // independent to `year` + assert_eq!(p.set_year_div_100(21), Err(IMPOSSIBLE)); + assert_eq!(p.set_year_div_100(19), Err(IMPOSSIBLE)); + assert_eq!(p.set_year_mod_100(37), Ok(())); // ditto + assert_eq!(p.set_year_mod_100(38), Err(IMPOSSIBLE)); + assert_eq!(p.set_year_mod_100(36), Err(IMPOSSIBLE)); + + let mut p = Parsed::new(); + assert_eq!(p.set_year(0), Ok(())); + assert_eq!(p.set_year_div_100(0), Ok(())); + assert_eq!(p.set_year_mod_100(0), Ok(())); + + let mut p = Parsed::new(); + assert_eq!(p.set_year_div_100(-1), Err(OUT_OF_RANGE)); + assert_eq!(p.set_year_mod_100(-1), Err(OUT_OF_RANGE)); + assert_eq!(p.set_year(-1), Ok(())); + assert_eq!(p.set_year(-2), Err(IMPOSSIBLE)); + assert_eq!(p.set_year(0), Err(IMPOSSIBLE)); + + let mut p = Parsed::new(); + assert_eq!(p.set_year_div_100(0x1_0000_0008), Err(OUT_OF_RANGE)); + assert_eq!(p.set_year_div_100(8), Ok(())); + assert_eq!(p.set_year_div_100(0x1_0000_0008), Err(OUT_OF_RANGE)); + + // month, week*, isoweek, ordinal, day, minute, second, nanosecond, offset + let mut p = Parsed::new(); + assert_eq!(p.set_month(7), Ok(())); + assert_eq!(p.set_month(1), Err(IMPOSSIBLE)); + assert_eq!(p.set_month(6), Err(IMPOSSIBLE)); + assert_eq!(p.set_month(8), Err(IMPOSSIBLE)); + assert_eq!(p.set_month(12), Err(IMPOSSIBLE)); + + let mut p = Parsed::new(); + assert_eq!(p.set_month(8), Ok(())); + assert_eq!(p.set_month(0x1_0000_0008), Err(OUT_OF_RANGE)); + + // hour + let mut p = Parsed::new(); + assert_eq!(p.set_hour(12), Ok(())); + assert_eq!(p.set_hour(11), Err(IMPOSSIBLE)); + assert_eq!(p.set_hour(13), Err(IMPOSSIBLE)); + assert_eq!(p.set_hour(12), Ok(())); + assert_eq!(p.set_ampm(false), Err(IMPOSSIBLE)); + assert_eq!(p.set_ampm(true), Ok(())); + assert_eq!(p.set_hour12(12), Ok(())); + assert_eq!(p.set_hour12(0), Err(OUT_OF_RANGE)); // requires canonical representation + assert_eq!(p.set_hour12(1), Err(IMPOSSIBLE)); + assert_eq!(p.set_hour12(11), Err(IMPOSSIBLE)); + + let mut p = Parsed::new(); + assert_eq!(p.set_ampm(true), Ok(())); + assert_eq!(p.set_hour12(7), Ok(())); + assert_eq!(p.set_hour(7), Err(IMPOSSIBLE)); + assert_eq!(p.set_hour(18), Err(IMPOSSIBLE)); + assert_eq!(p.set_hour(19), Ok(())); + + // timestamp + let mut p = Parsed::new(); + assert_eq!(p.set_timestamp(1_234_567_890), Ok(())); + assert_eq!(p.set_timestamp(1_234_567_889), Err(IMPOSSIBLE)); + assert_eq!(p.set_timestamp(1_234_567_891), Err(IMPOSSIBLE)); + } + + #[test] + fn test_parsed_to_naive_date() { + macro_rules! parse { + ($($k:ident: $v:expr),*) => ( + Parsed { $($k: Some($v),)* ..Parsed::new() }.to_naive_date() + ) + } + + let ymd = |y,m,d| Ok(NaiveDate::from_ymd(y, m, d)); + + // ymd: omission of fields + assert_eq!(parse!(), Err(NOT_ENOUGH)); + assert_eq!(parse!(year: 1984), Err(NOT_ENOUGH)); + assert_eq!(parse!(year: 1984, month: 1), Err(NOT_ENOUGH)); + assert_eq!(parse!(year: 1984, month: 1, day: 2), ymd(1984, 1, 2)); + assert_eq!(parse!(year: 1984, day: 2), Err(NOT_ENOUGH)); + assert_eq!(parse!(year_div_100: 19), Err(NOT_ENOUGH)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: 84), Err(NOT_ENOUGH)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: 84, month: 1), Err(NOT_ENOUGH)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: 84, month: 1, day: 2), ymd(1984, 1, 2)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: 84, day: 2), Err(NOT_ENOUGH)); + assert_eq!(parse!(year_div_100: 19, month: 1, day: 2), Err(NOT_ENOUGH)); + assert_eq!(parse!(year_mod_100: 70, month: 1, day: 2), ymd(1970, 1, 2)); + assert_eq!(parse!(year_mod_100: 69, month: 1, day: 2), ymd(2069, 1, 2)); + + // ymd: out-of-range conditions + assert_eq!(parse!(year_div_100: 19, year_mod_100: 84, month: 2, day: 29), + ymd(1984, 2, 29)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: 83, month: 2, day: 29), + Err(OUT_OF_RANGE)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: 83, month: 13, day: 1), + Err(OUT_OF_RANGE)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: 83, month: 12, day: 31), + ymd(1983, 12, 31)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: 83, month: 12, day: 32), + Err(OUT_OF_RANGE)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: 83, month: 12, day: 0), + Err(OUT_OF_RANGE)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: 100, month: 1, day: 1), + Err(OUT_OF_RANGE)); + assert_eq!(parse!(year_div_100: 19, year_mod_100: -1, month: 1, day: 1), + Err(OUT_OF_RANGE)); + assert_eq!(parse!(year_div_100: 0, year_mod_100: 0, month: 1, day: 1), + ymd(0, 1, 1)); + assert_eq!(parse!(year_div_100: -1, year_mod_100: 42, month: 1, day: 1), + Err(OUT_OF_RANGE)); + let max_year = date::MAX.year(); + assert_eq!(parse!(year_div_100: max_year / 100, + year_mod_100: max_year % 100, month: 1, day: 1), + ymd(max_year, 1, 1)); + assert_eq!(parse!(year_div_100: (max_year + 1) / 100, + year_mod_100: (max_year + 1) % 100, month: 1, day: 1), + Err(OUT_OF_RANGE)); + + // ymd: conflicting inputs + assert_eq!(parse!(year: 1984, year_div_100: 19, month: 1, day: 1), ymd(1984, 1, 1)); + assert_eq!(parse!(year: 1984, year_div_100: 20, month: 1, day: 1), Err(IMPOSSIBLE)); + assert_eq!(parse!(year: 1984, year_mod_100: 84, month: 1, day: 1), ymd(1984, 1, 1)); + assert_eq!(parse!(year: 1984, year_mod_100: 83, month: 1, day: 1), Err(IMPOSSIBLE)); + assert_eq!(parse!(year: 1984, year_div_100: 19, year_mod_100: 84, month: 1, day: 1), + ymd(1984, 1, 1)); + assert_eq!(parse!(year: 1984, year_div_100: 18, year_mod_100: 94, month: 1, day: 1), + Err(IMPOSSIBLE)); + assert_eq!(parse!(year: 1984, year_div_100: 18, year_mod_100: 184, month: 1, day: 1), + Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: -1, year_div_100: 0, year_mod_100: -1, month: 1, day: 1), + Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: -1, year_div_100: -1, year_mod_100: 99, month: 1, day: 1), + Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: -1, year_div_100: 0, month: 1, day: 1), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: -1, year_mod_100: 99, month: 1, day: 1), Err(OUT_OF_RANGE)); + + // weekdates + assert_eq!(parse!(year: 2000, week_from_mon: 0), Err(NOT_ENOUGH)); + assert_eq!(parse!(year: 2000, week_from_sun: 0), Err(NOT_ENOUGH)); + assert_eq!(parse!(year: 2000, weekday: Sun), Err(NOT_ENOUGH)); + assert_eq!(parse!(year: 2000, week_from_mon: 0, weekday: Fri), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: 2000, week_from_sun: 0, weekday: Fri), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: 2000, week_from_mon: 0, weekday: Sat), ymd(2000, 1, 1)); + assert_eq!(parse!(year: 2000, week_from_sun: 0, weekday: Sat), ymd(2000, 1, 1)); + assert_eq!(parse!(year: 2000, week_from_mon: 0, weekday: Sun), ymd(2000, 1, 2)); + assert_eq!(parse!(year: 2000, week_from_sun: 1, weekday: Sun), ymd(2000, 1, 2)); + assert_eq!(parse!(year: 2000, week_from_mon: 1, weekday: Mon), ymd(2000, 1, 3)); + assert_eq!(parse!(year: 2000, week_from_sun: 1, weekday: Mon), ymd(2000, 1, 3)); + assert_eq!(parse!(year: 2000, week_from_mon: 1, weekday: Sat), ymd(2000, 1, 8)); + assert_eq!(parse!(year: 2000, week_from_sun: 1, weekday: Sat), ymd(2000, 1, 8)); + assert_eq!(parse!(year: 2000, week_from_mon: 1, weekday: Sun), ymd(2000, 1, 9)); + assert_eq!(parse!(year: 2000, week_from_sun: 2, weekday: Sun), ymd(2000, 1, 9)); + assert_eq!(parse!(year: 2000, week_from_mon: 2, weekday: Mon), ymd(2000, 1, 10)); + assert_eq!(parse!(year: 2000, week_from_sun: 52, weekday: Sat), ymd(2000, 12, 30)); + assert_eq!(parse!(year: 2000, week_from_sun: 53, weekday: Sun), ymd(2000, 12, 31)); + assert_eq!(parse!(year: 2000, week_from_sun: 53, weekday: Mon), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: 2000, week_from_sun: 0xffffffff, weekday: Mon), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: 2006, week_from_sun: 0, weekday: Sat), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: 2006, week_from_sun: 1, weekday: Sun), ymd(2006, 1, 1)); + + // weekdates: conflicting inputs + assert_eq!(parse!(year: 2000, week_from_mon: 1, week_from_sun: 1, weekday: Sat), + ymd(2000, 1, 8)); + assert_eq!(parse!(year: 2000, week_from_mon: 1, week_from_sun: 2, weekday: Sun), + ymd(2000, 1, 9)); + assert_eq!(parse!(year: 2000, week_from_mon: 1, week_from_sun: 1, weekday: Sun), + Err(IMPOSSIBLE)); + assert_eq!(parse!(year: 2000, week_from_mon: 2, week_from_sun: 2, weekday: Sun), + Err(IMPOSSIBLE)); + + // ISO weekdates + assert_eq!(parse!(isoyear: 2004, isoweek: 53), Err(NOT_ENOUGH)); + assert_eq!(parse!(isoyear: 2004, isoweek: 53, weekday: Fri), ymd(2004, 12, 31)); + assert_eq!(parse!(isoyear: 2004, isoweek: 53, weekday: Sat), ymd(2005, 1, 1)); + assert_eq!(parse!(isoyear: 2004, isoweek: 0xffffffff, weekday: Sat), Err(OUT_OF_RANGE)); + assert_eq!(parse!(isoyear: 2005, isoweek: 0, weekday: Thu), Err(OUT_OF_RANGE)); + assert_eq!(parse!(isoyear: 2005, isoweek: 5, weekday: Thu), ymd(2005, 2, 3)); + assert_eq!(parse!(isoyear: 2005, weekday: Thu), Err(NOT_ENOUGH)); + + // year and ordinal + assert_eq!(parse!(ordinal: 123), Err(NOT_ENOUGH)); + assert_eq!(parse!(year: 2000, ordinal: 0), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: 2000, ordinal: 1), ymd(2000, 1, 1)); + assert_eq!(parse!(year: 2000, ordinal: 60), ymd(2000, 2, 29)); + assert_eq!(parse!(year: 2000, ordinal: 61), ymd(2000, 3, 1)); + assert_eq!(parse!(year: 2000, ordinal: 366), ymd(2000, 12, 31)); + assert_eq!(parse!(year: 2000, ordinal: 367), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: 2000, ordinal: 0xffffffff), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: 2100, ordinal: 0), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: 2100, ordinal: 1), ymd(2100, 1, 1)); + assert_eq!(parse!(year: 2100, ordinal: 59), ymd(2100, 2, 28)); + assert_eq!(parse!(year: 2100, ordinal: 60), ymd(2100, 3, 1)); + assert_eq!(parse!(year: 2100, ordinal: 365), ymd(2100, 12, 31)); + assert_eq!(parse!(year: 2100, ordinal: 366), Err(OUT_OF_RANGE)); + assert_eq!(parse!(year: 2100, ordinal: 0xffffffff), Err(OUT_OF_RANGE)); + + // more complex cases + assert_eq!(parse!(year: 2014, month: 12, day: 31, ordinal: 365, isoyear: 2015, isoweek: 1, + week_from_sun: 52, week_from_mon: 52, weekday: Wed), + ymd(2014, 12, 31)); + assert_eq!(parse!(year: 2014, month: 12, ordinal: 365, isoyear: 2015, isoweek: 1, + week_from_sun: 52, week_from_mon: 52), + ymd(2014, 12, 31)); + assert_eq!(parse!(year: 2014, month: 12, day: 31, ordinal: 365, isoyear: 2014, isoweek: 53, + week_from_sun: 52, week_from_mon: 52, weekday: Wed), + Err(IMPOSSIBLE)); // no ISO week date 2014-W53-3 + assert_eq!(parse!(year: 2012, isoyear: 2015, isoweek: 1, + week_from_sun: 52, week_from_mon: 52), + Err(NOT_ENOUGH)); // ambiguous (2014-12-29, 2014-12-30, 2014-12-31) + assert_eq!(parse!(year_div_100: 20, isoyear_mod_100: 15, ordinal: 366), + Err(NOT_ENOUGH)); // technically unique (2014-12-31) but Chrono gives up + } + + #[test] + fn test_parsed_to_naive_time() { + macro_rules! parse { + ($($k:ident: $v:expr),*) => ( + Parsed { $($k: Some($v),)* ..Parsed::new() }.to_naive_time() + ) + } + + let hms = |h,m,s| Ok(NaiveTime::from_hms(h, m, s)); + let hmsn = |h,m,s,n| Ok(NaiveTime::from_hms_nano(h, m, s, n)); + + // omission of fields + assert_eq!(parse!(), Err(NOT_ENOUGH)); + assert_eq!(parse!(hour_div_12: 0), Err(NOT_ENOUGH)); + assert_eq!(parse!(hour_div_12: 0, hour_mod_12: 1), Err(NOT_ENOUGH)); + assert_eq!(parse!(hour_div_12: 0, hour_mod_12: 1, minute: 23), hms(1,23,0)); + assert_eq!(parse!(hour_div_12: 0, hour_mod_12: 1, minute: 23, second: 45), hms(1,23,45)); + assert_eq!(parse!(hour_div_12: 0, hour_mod_12: 1, minute: 23, second: 45, + nanosecond: 678_901_234), + hmsn(1,23,45,678_901_234)); + assert_eq!(parse!(hour_div_12: 1, hour_mod_12: 11, minute: 45, second: 6), hms(23,45,6)); + assert_eq!(parse!(hour_mod_12: 1, minute: 23), Err(NOT_ENOUGH)); + assert_eq!(parse!(hour_div_12: 0, hour_mod_12: 1, minute: 23, nanosecond: 456_789_012), + Err(NOT_ENOUGH)); + + // out-of-range conditions + assert_eq!(parse!(hour_div_12: 2, hour_mod_12: 0, minute: 0), Err(OUT_OF_RANGE)); + assert_eq!(parse!(hour_div_12: 1, hour_mod_12: 12, minute: 0), Err(OUT_OF_RANGE)); + assert_eq!(parse!(hour_div_12: 0, hour_mod_12: 1, minute: 60), Err(OUT_OF_RANGE)); + assert_eq!(parse!(hour_div_12: 0, hour_mod_12: 1, minute: 23, second: 61), + Err(OUT_OF_RANGE)); + assert_eq!(parse!(hour_div_12: 0, hour_mod_12: 1, minute: 23, second: 34, + nanosecond: 1_000_000_000), + Err(OUT_OF_RANGE)); + + // leap seconds + assert_eq!(parse!(hour_div_12: 0, hour_mod_12: 1, minute: 23, second: 60), + hmsn(1,23,59,1_000_000_000)); + assert_eq!(parse!(hour_div_12: 0, hour_mod_12: 1, minute: 23, second: 60, + nanosecond: 999_999_999), + hmsn(1,23,59,1_999_999_999)); + } + + #[test] + fn test_parsed_to_naive_datetime_with_offset() { + macro_rules! parse { + (offset = $offset:expr; $($k:ident: $v:expr),*) => ( + Parsed { $($k: Some($v),)* ..Parsed::new() }.to_naive_datetime_with_offset($offset) + ); + ($($k:ident: $v:expr),*) => (parse!(offset = 0; $($k: $v),*)) + } + + let ymdhms = |y,m,d,h,n,s| Ok(NaiveDate::from_ymd(y, m, d).and_hms(h, n, s)); + let ymdhmsn = + |y,m,d,h,n,s,nano| Ok(NaiveDate::from_ymd(y, m, d).and_hms_nano(h, n, s, nano)); + + // omission of fields + assert_eq!(parse!(), Err(NOT_ENOUGH)); + assert_eq!(parse!(year: 2015, month: 1, day: 30, + hour_div_12: 1, hour_mod_12: 2, minute: 38), + ymdhms(2015,1,30, 14,38,0)); + assert_eq!(parse!(year: 1997, month: 1, day: 30, + hour_div_12: 1, hour_mod_12: 2, minute: 38, second: 5), + ymdhms(1997,1,30, 14,38,5)); + assert_eq!(parse!(year: 2012, ordinal: 34, hour_div_12: 0, hour_mod_12: 5, + minute: 6, second: 7, nanosecond: 890_123_456), + ymdhmsn(2012,2,3, 5,6,7,890_123_456)); + assert_eq!(parse!(timestamp: 0), ymdhms(1970,1,1, 0,0,0)); + assert_eq!(parse!(timestamp: 1, nanosecond: 0), ymdhms(1970,1,1, 0,0,1)); + assert_eq!(parse!(timestamp: 1, nanosecond: 1), Err(IMPOSSIBLE)); + assert_eq!(parse!(timestamp: 1_420_000_000), ymdhms(2014,12,31, 4,26,40)); + assert_eq!(parse!(timestamp: -0x1_0000_0000), ymdhms(1833,11,24, 17,31,44)); + + // full fields + assert_eq!(parse!(year: 2014, year_div_100: 20, year_mod_100: 14, month: 12, day: 31, + ordinal: 365, isoyear: 2015, isoyear_div_100: 20, isoyear_mod_100: 15, + isoweek: 1, week_from_sun: 52, week_from_mon: 52, weekday: Wed, + hour_div_12: 0, hour_mod_12: 4, minute: 26, second: 40, + nanosecond: 12_345_678, timestamp: 1_420_000_000), + ymdhmsn(2014,12,31, 4,26,40,12_345_678)); + assert_eq!(parse!(year: 2014, year_div_100: 20, year_mod_100: 14, month: 12, day: 31, + ordinal: 365, isoyear: 2015, isoyear_div_100: 20, isoyear_mod_100: 15, + isoweek: 1, week_from_sun: 52, week_from_mon: 52, weekday: Wed, + hour_div_12: 0, hour_mod_12: 4, minute: 26, second: 40, + nanosecond: 12_345_678, timestamp: 1_419_999_999), + Err(IMPOSSIBLE)); + assert_eq!(parse!(offset = 32400; + year: 2014, year_div_100: 20, year_mod_100: 14, month: 12, day: 31, + ordinal: 365, isoyear: 2015, isoyear_div_100: 20, isoyear_mod_100: 15, + isoweek: 1, week_from_sun: 52, week_from_mon: 52, weekday: Wed, + hour_div_12: 0, hour_mod_12: 4, minute: 26, second: 40, + nanosecond: 12_345_678, timestamp: 1_419_967_600), + ymdhmsn(2014,12,31, 4,26,40,12_345_678)); + + // more timestamps + let max_days_from_year_1970 = date::MAX - NaiveDate::from_ymd(1970,1,1); + let year_0_from_year_1970 = NaiveDate::from_ymd(0,1,1) - NaiveDate::from_ymd(1970,1,1); + let min_days_from_year_1970 = date::MIN - NaiveDate::from_ymd(1970,1,1); + assert_eq!(parse!(timestamp: min_days_from_year_1970.num_seconds()), + ymdhms(date::MIN.year(),1,1, 0,0,0)); + assert_eq!(parse!(timestamp: year_0_from_year_1970.num_seconds()), + ymdhms(0,1,1, 0,0,0)); + assert_eq!(parse!(timestamp: max_days_from_year_1970.num_seconds() + 86399), + ymdhms(date::MAX.year(),12,31, 23,59,59)); + + // leap seconds #1: partial fields + assert_eq!(parse!(second: 59, timestamp: 1_341_100_798), Err(IMPOSSIBLE)); + assert_eq!(parse!(second: 59, timestamp: 1_341_100_799), ymdhms(2012,6,30, 23,59,59)); + assert_eq!(parse!(second: 59, timestamp: 1_341_100_800), Err(IMPOSSIBLE)); + assert_eq!(parse!(second: 60, timestamp: 1_341_100_799), + ymdhmsn(2012,6,30, 23,59,59,1_000_000_000)); + assert_eq!(parse!(second: 60, timestamp: 1_341_100_800), + ymdhmsn(2012,6,30, 23,59,59,1_000_000_000)); + assert_eq!(parse!(second: 0, timestamp: 1_341_100_800), ymdhms(2012,7,1, 0,0,0)); + assert_eq!(parse!(second: 1, timestamp: 1_341_100_800), Err(IMPOSSIBLE)); + assert_eq!(parse!(second: 60, timestamp: 1_341_100_801), Err(IMPOSSIBLE)); + + // leap seconds #2: full fields + // we need to have separate tests for them since it uses another control flow. + assert_eq!(parse!(year: 2012, ordinal: 182, hour_div_12: 1, hour_mod_12: 11, + minute: 59, second: 59, timestamp: 1_341_100_798), + Err(IMPOSSIBLE)); + assert_eq!(parse!(year: 2012, ordinal: 182, hour_div_12: 1, hour_mod_12: 11, + minute: 59, second: 59, timestamp: 1_341_100_799), + ymdhms(2012,6,30, 23,59,59)); + assert_eq!(parse!(year: 2012, ordinal: 182, hour_div_12: 1, hour_mod_12: 11, + minute: 59, second: 59, timestamp: 1_341_100_800), + Err(IMPOSSIBLE)); + assert_eq!(parse!(year: 2012, ordinal: 182, hour_div_12: 1, hour_mod_12: 11, + minute: 59, second: 60, timestamp: 1_341_100_799), + ymdhmsn(2012,6,30, 23,59,59,1_000_000_000)); + assert_eq!(parse!(year: 2012, ordinal: 182, hour_div_12: 1, hour_mod_12: 11, + minute: 59, second: 60, timestamp: 1_341_100_800), + ymdhmsn(2012,6,30, 23,59,59,1_000_000_000)); + assert_eq!(parse!(year: 2012, ordinal: 183, hour_div_12: 0, hour_mod_12: 0, + minute: 0, second: 0, timestamp: 1_341_100_800), + ymdhms(2012,7,1, 0,0,0)); + assert_eq!(parse!(year: 2012, ordinal: 183, hour_div_12: 0, hour_mod_12: 0, + minute: 0, second: 1, timestamp: 1_341_100_800), + Err(IMPOSSIBLE)); + assert_eq!(parse!(year: 2012, ordinal: 182, hour_div_12: 1, hour_mod_12: 11, + minute: 59, second: 60, timestamp: 1_341_100_801), + Err(IMPOSSIBLE)); + + // error codes + assert_eq!(parse!(year: 2015, month: 1, day: 20, weekday: Tue, + hour_div_12: 2, hour_mod_12: 1, minute: 35, second: 20), + Err(OUT_OF_RANGE)); // `hour_div_12` is out of range + } + + #[test] + fn test_parsed_to_datetime() { + macro_rules! parse { + ($($k:ident: $v:expr),*) => ( + Parsed { $($k: Some($v),)* ..Parsed::new() }.to_datetime() + ) + } + + let ymdhmsn = |y,m,d,h,n,s,nano,off| Ok(FixedOffset::east(off).ymd(y, m, d) + .and_hms_nano(h, n, s, nano)); + + assert_eq!(parse!(offset: 0), Err(NOT_ENOUGH)); + assert_eq!(parse!(year: 2014, ordinal: 365, hour_div_12: 0, hour_mod_12: 4, + minute: 26, second: 40, nanosecond: 12_345_678), + Err(NOT_ENOUGH)); + assert_eq!(parse!(year: 2014, ordinal: 365, hour_div_12: 0, hour_mod_12: 4, + minute: 26, second: 40, nanosecond: 12_345_678, offset: 0), + ymdhmsn(2014,12,31, 4,26,40,12_345_678, 0)); + assert_eq!(parse!(year: 2014, ordinal: 365, hour_div_12: 1, hour_mod_12: 1, + minute: 26, second: 40, nanosecond: 12_345_678, offset: 32400), + ymdhmsn(2014,12,31, 13,26,40,12_345_678, 32400)); + assert_eq!(parse!(year: 2014, ordinal: 365, hour_div_12: 0, hour_mod_12: 1, + minute: 42, second: 4, nanosecond: 12_345_678, offset: -9876), + ymdhmsn(2014,12,31, 1,42,4,12_345_678, -9876)); + assert_eq!(parse!(year: 2015, ordinal: 1, hour_div_12: 0, hour_mod_12: 4, + minute: 26, second: 40, nanosecond: 12_345_678, offset: 86400), + Err(OUT_OF_RANGE)); // `FixedOffset` does not support such huge offset + } + + #[test] + fn test_parsed_to_datetime_with_timezone() { + macro_rules! parse { + ($tz:expr; $($k:ident: $v:expr),*) => ( + Parsed { $($k: Some($v),)* ..Parsed::new() }.to_datetime_with_timezone(&$tz) + ) + } + + // single result from ymdhms + assert_eq!(parse!(UTC; + year: 2014, ordinal: 365, hour_div_12: 0, hour_mod_12: 4, + minute: 26, second: 40, nanosecond: 12_345_678, offset: 0), + Ok(UTC.ymd(2014, 12, 31).and_hms_nano(4, 26, 40, 12_345_678))); + assert_eq!(parse!(UTC; + year: 2014, ordinal: 365, hour_div_12: 1, hour_mod_12: 1, + minute: 26, second: 40, nanosecond: 12_345_678, offset: 32400), + Err(IMPOSSIBLE)); + assert_eq!(parse!(FixedOffset::east(32400); + year: 2014, ordinal: 365, hour_div_12: 0, hour_mod_12: 4, + minute: 26, second: 40, nanosecond: 12_345_678, offset: 0), + Err(IMPOSSIBLE)); + assert_eq!(parse!(FixedOffset::east(32400); + year: 2014, ordinal: 365, hour_div_12: 1, hour_mod_12: 1, + minute: 26, second: 40, nanosecond: 12_345_678, offset: 32400), + Ok(FixedOffset::east(32400).ymd(2014, 12, 31) + .and_hms_nano(13, 26, 40, 12_345_678))); + + // single result from timestamp + assert_eq!(parse!(UTC; timestamp: 1_420_000_000, offset: 0), + Ok(UTC.ymd(2014, 12, 31).and_hms(4, 26, 40))); + assert_eq!(parse!(UTC; timestamp: 1_420_000_000, offset: 32400), + Err(IMPOSSIBLE)); + assert_eq!(parse!(FixedOffset::east(32400); timestamp: 1_420_000_000, offset: 0), + Err(IMPOSSIBLE)); + assert_eq!(parse!(FixedOffset::east(32400); timestamp: 1_420_000_000, offset: 32400), + Ok(FixedOffset::east(32400).ymd(2014, 12, 31).and_hms(13, 26, 40))); + + // TODO test with a variable time zone (for None and Ambiguous cases) + } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/format/parse.rs cargo-0.19.0/vendor/chrono-0.2.25/src/format/parse.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/format/parse.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/format/parse.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,720 @@ +// This is a part of rust-chrono. +// Copyright (c) 2015, Kang Seonghoon. +// Portions copyright (c) 2015, John Nagle. +// See README.md and LICENSE.txt for details. + +//! Date and time parsing routines. + +use std::usize; + +use Weekday; + +use super::scan; +use super::{Parsed, ParseResult, Item}; +use super::{OUT_OF_RANGE, INVALID, TOO_SHORT, TOO_LONG, BAD_FORMAT}; + +fn set_weekday_with_num_days_from_sunday(p: &mut Parsed, v: i64) -> ParseResult<()> { + p.set_weekday(match v { + 0 => Weekday::Sun, 1 => Weekday::Mon, 2 => Weekday::Tue, + 3 => Weekday::Wed, 4 => Weekday::Thu, 5 => Weekday::Fri, + 6 => Weekday::Sat, _ => return Err(OUT_OF_RANGE) + }) +} + +fn set_weekday_with_number_from_monday(p: &mut Parsed, v: i64) -> ParseResult<()> { + p.set_weekday(match v { + 1 => Weekday::Mon, 2 => Weekday::Tue, 3 => Weekday::Wed, + 4 => Weekday::Thu, 5 => Weekday::Fri, 6 => Weekday::Sat, + 7 => Weekday::Sun, _ => return Err(OUT_OF_RANGE) + }) +} + +fn parse_rfc2822<'a>(parsed: &mut Parsed, mut s: &'a str) -> ParseResult<(&'a str, ())> { + macro_rules! try_consume { + ($e:expr) => ({ let (s_, v) = try!($e); s = s_; v }) + } + + // an adapted RFC 2822 syntax from Section 3.3 and 4.3: + // + // date-time = [ day-of-week "," ] date 1*S time *S + // day-of-week = *S day-name *S + // day-name = "Mon" / "Tue" / "Wed" / "Thu" / "Fri" / "Sat" / "Sun" + // date = day month year + // day = *S 1*2DIGIT *S + // month = 1*S month-name 1*S + // month-name = "Jan" / "Feb" / "Mar" / "Apr" / "May" / "Jun" / + // "Jul" / "Aug" / "Sep" / "Oct" / "Nov" / "Dec" + // year = *S 2*DIGIT *S + // time = time-of-day 1*S zone + // time-of-day = hour ":" minute [ ":" second ] + // hour = *S 2DIGIT *S + // minute = *S 2DIGIT *S + // second = *S 2DIGIT *S + // zone = ( "+" / "-" ) 4DIGIT / + // "UT" / "GMT" / ; same to +0000 + // "EST" / "CST" / "MST" / "PST" / ; same to -0500 to -0800 + // "EDT" / "CDT" / "MDT" / "PDT" / ; same to -0400 to -0700 + // 1*(%d65-90 / %d97-122) ; same to -0000 + // + // some notes: + // + // - quoted characters can be in any mixture of lower and upper cases. + // + // - we do not recognize a folding white space (FWS) or comment (CFWS). + // for our purposes, instead, we accept any sequence of Unicode + // white space characters (denoted here to `S`). any actual RFC 2822 + // parser is expected to parse FWS and/or CFWS themselves and replace + // it with a single SP (`%x20`); this is legitimate. + // + // - two-digit year < 50 should be interpreted by adding 2000. + // two-digit year >= 50 or three-digit year should be interpreted + // by adding 1900. note that four-or-more-digit years less than 1000 + // are *never* affected by this rule. + // + // - zone of `-0000` and any unrecognized legacy time zones (including + // *every* one-letter military time zones) are considered "missing", + // in such that we don't actually know what time zone is being used. + // + // - mismatching day-of-week is always an error, which is consistent to + // Chrono's own rules. + // + // - zones can range from `-9959` to `+9959`, but `FixedOffset` does not + // support offsets larger than 24 hours. this is not *that* problematic + // since we do not directly go to a `DateTime` so one can recover + // the offset information from `Parsed` anyway. + + s = s.trim_left(); + + if let Ok((s_, weekday)) = scan::short_weekday(s) { + if !s_.starts_with(",") { return Err(INVALID); } + s = &s_[1..]; + try!(parsed.set_weekday(weekday)); + } + + s = s.trim_left(); + try!(parsed.set_day(try_consume!(scan::number(s, 1, 2)))); + s = try!(scan::space(s)); // mandatory + try!(parsed.set_month(1 + try_consume!(scan::short_month0(s)) as i64)); + s = try!(scan::space(s)); // mandatory + + // distinguish two- and three-digit years from four-digit years + let prevlen = s.len(); + let mut year = try_consume!(scan::number(s, 2, usize::MAX)); + let yearlen = prevlen - s.len(); + match (yearlen, year) { + (2, 0...49) => { year += 2000; } // 47 -> 2047, 05 -> 2005 + (2, 50...99) => { year += 1900; } // 79 -> 1979 + (3, _) => { year += 1900; } // 112 -> 2012, 009 -> 1909 + (_, _) => {} // 1987 -> 1987, 0654 -> 0654 + } + try!(parsed.set_year(year)); + + s = try!(scan::space(s)); // mandatory + try!(parsed.set_hour(try_consume!(scan::number(s, 2, 2)))); + s = try!(scan::char(s.trim_left(), b':')).trim_left(); // *S ":" *S + try!(parsed.set_minute(try_consume!(scan::number(s, 2, 2)))); + s = s.trim_left(); + if !s.is_empty() { // [ ":" *S 2DIGIT ] + s = try!(scan::char(s, b':')).trim_left(); + try!(parsed.set_second(try_consume!(scan::number(s, 2, 2)))); + } + + s = try!(scan::space(s)); // mandatory + if let Some(offset) = try_consume!(scan::timezone_offset_2822(s)) { + // only set the offset when it is definitely known (i.e. not `-0000`) + try!(parsed.set_offset(offset as i64)); + } + + Ok((s, ())) +} + +fn parse_rfc3339<'a>(parsed: &mut Parsed, mut s: &'a str) -> ParseResult<(&'a str, ())> { + macro_rules! try_consume { + ($e:expr) => ({ let (s_, v) = try!($e); s = s_; v }) + } + + // an adapted RFC 3339 syntax from Section 5.6: + // + // date-fullyear = 4DIGIT + // date-month = 2DIGIT ; 01-12 + // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year + // time-hour = 2DIGIT ; 00-23 + // time-minute = 2DIGIT ; 00-59 + // time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules + // time-secfrac = "." 1*DIGIT + // time-numoffset = ("+" / "-") time-hour ":" time-minute + // time-offset = "Z" / time-numoffset + // partial-time = time-hour ":" time-minute ":" time-second [time-secfrac] + // full-date = date-fullyear "-" date-month "-" date-mday + // full-time = partial-time time-offset + // date-time = full-date "T" full-time + // + // some notes: + // + // - quoted characters can be in any mixture of lower and upper cases. + // + // - it may accept any number of fractional digits for seconds. + // for Chrono, this means that we should skip digits past first 9 digits. + // + // - unlike RFC 2822, the valid offset ranges from -23:59 to +23:59. + // note that this restriction is unique to RFC 3339 and not ISO 8601. + // since this is not a typical Chrono behavior, we check it earlier. + + try!(parsed.set_year(try_consume!(scan::number(s, 4, 4)))); + s = try!(scan::char(s, b'-')); + try!(parsed.set_month(try_consume!(scan::number(s, 2, 2)))); + s = try!(scan::char(s, b'-')); + try!(parsed.set_day(try_consume!(scan::number(s, 2, 2)))); + + s = match s.as_bytes().first() { + Some(&b't') | Some(&b'T') => &s[1..], + Some(_) => return Err(INVALID), + None => return Err(TOO_SHORT), + }; + + try!(parsed.set_hour(try_consume!(scan::number(s, 2, 2)))); + s = try!(scan::char(s, b':')); + try!(parsed.set_minute(try_consume!(scan::number(s, 2, 2)))); + s = try!(scan::char(s, b':')); + try!(parsed.set_second(try_consume!(scan::number(s, 2, 2)))); + if s.starts_with(".") { + let nanosecond = try_consume!(scan::nanosecond(&s[1..])); + try!(parsed.set_nanosecond(nanosecond)); + } + + let offset = try_consume!(scan::timezone_offset_zulu(s, |s| scan::char(s, b':'))); + if offset <= -86400 || offset >= 86400 { return Err(OUT_OF_RANGE); } + try!(parsed.set_offset(offset as i64)); + + Ok((s, ())) +} + +/// Tries to parse given string into `parsed` with given formatting items. +/// Returns `Ok` when the entire string has been parsed (otherwise `parsed` should not be used). +/// There should be no trailing string after parsing; +/// use a stray [`Item::Space`](./enum.Item.html#variant.Space) to trim whitespaces. +/// +/// This particular date and time parser is: +/// +/// - Greedy. It will consume the longest possible prefix. +/// For example, `April` is always consumed entirely when the long month name is requested; +/// it equally accepts `Apr`, but prefers the longer prefix in this case. +/// +/// - Padding-agnostic (for numeric items). +/// The [`Pad`](./enum.Pad.html) field is completely ignored, +/// so one can prepend any number of whitespace then any number of zeroes before numbers. +/// +/// - (Still) obeying the intrinsic parsing width. This allows, for example, parsing `HHMMSS`. +pub fn parse<'a, I>(parsed: &mut Parsed, mut s: &str, items: I) -> ParseResult<()> + where I: Iterator> { + macro_rules! try_consume { + ($e:expr) => ({ let (s_, v) = try!($e); s = s_; v }) + } + + for item in items { + match item { + Item::Literal(prefix) => { + if s.len() < prefix.len() { return Err(TOO_SHORT); } + if !s.starts_with(prefix) { return Err(INVALID); } + s = &s[prefix.len()..]; + } + + Item::Space(_) => { + s = s.trim_left(); + } + + Item::Numeric(spec, _pad) => { + use super::Numeric::*; + + let (width, signed, set): (usize, bool, + fn(&mut Parsed, i64) -> ParseResult<()>) = match spec { + Year => (4, true, Parsed::set_year), + YearDiv100 => (2, false, Parsed::set_year_div_100), + YearMod100 => (2, false, Parsed::set_year_mod_100), + IsoYear => (4, true, Parsed::set_isoyear), + IsoYearDiv100 => (2, false, Parsed::set_isoyear_div_100), + IsoYearMod100 => (2, false, Parsed::set_isoyear_mod_100), + Month => (2, false, Parsed::set_month), + Day => (2, false, Parsed::set_day), + WeekFromSun => (2, false, Parsed::set_week_from_sun), + WeekFromMon => (2, false, Parsed::set_week_from_mon), + IsoWeek => (2, false, Parsed::set_isoweek), + NumDaysFromSun => (1, false, set_weekday_with_num_days_from_sunday), + WeekdayFromMon => (1, false, set_weekday_with_number_from_monday), + Ordinal => (3, false, Parsed::set_ordinal), + Hour => (2, false, Parsed::set_hour), + Hour12 => (2, false, Parsed::set_hour12), + Minute => (2, false, Parsed::set_minute), + Second => (2, false, Parsed::set_second), + Nanosecond => (9, false, Parsed::set_nanosecond), + Timestamp => (usize::MAX, false, Parsed::set_timestamp), + }; + + s = s.trim_left(); + let v = if signed { + if s.starts_with("-") { + let v = try_consume!(scan::number(&s[1..], 1, usize::MAX)); + try!(0i64.checked_sub(v).ok_or(OUT_OF_RANGE)) + } else if s.starts_with("+") { + try_consume!(scan::number(&s[1..], 1, usize::MAX)) + } else { + // if there is no explicit sign, we respect the original `width` + try_consume!(scan::number(s, 1, width)) + } + } else { + try_consume!(scan::number(s, 1, width)) + }; + try!(set(parsed, v)); + } + + Item::Fixed(spec) => { + use super::Fixed::*; + + match spec { + ShortMonthName => { + let month0 = try_consume!(scan::short_month0(s)); + try!(parsed.set_month(month0 as i64 + 1)); + } + + LongMonthName => { + let month0 = try_consume!(scan::short_or_long_month0(s)); + try!(parsed.set_month(month0 as i64 + 1)); + } + + ShortWeekdayName => { + let weekday = try_consume!(scan::short_weekday(s)); + try!(parsed.set_weekday(weekday)); + } + + LongWeekdayName => { + let weekday = try_consume!(scan::short_or_long_weekday(s)); + try!(parsed.set_weekday(weekday)); + } + + LowerAmPm | UpperAmPm => { + if s.len() < 2 { return Err(TOO_SHORT); } + let ampm = match (s.as_bytes()[0] | 32, s.as_bytes()[1] | 32) { + (b'a',b'm') => false, + (b'p',b'm') => true, + _ => return Err(INVALID) + }; + try!(parsed.set_ampm(ampm)); + s = &s[2..]; + } + + Nanosecond | Nanosecond3 | Nanosecond6 | Nanosecond9=> { + if s.starts_with(".") { + let nano = try_consume!(scan::nanosecond(&s[1..])); + try!(parsed.set_nanosecond(nano)); + } + } + + TimezoneName => return Err(BAD_FORMAT), + + TimezoneOffsetColon | TimezoneOffset => { + let offset = try_consume!(scan::timezone_offset(s.trim_left(), + scan::colon_or_space)); + try!(parsed.set_offset(offset as i64)); + } + + TimezoneOffsetColonZ | TimezoneOffsetZ => { + let offset = try_consume!(scan::timezone_offset_zulu(s.trim_left(), + scan::colon_or_space)); + try!(parsed.set_offset(offset as i64)); + } + + RFC2822 => try_consume!(parse_rfc2822(parsed, s)), + RFC3339 => try_consume!(parse_rfc3339(parsed, s)), + } + } + + Item::Error => { + return Err(BAD_FORMAT); + } + } + } + + // if there are trailling chars, it is an error + if !s.is_empty() { + Err(TOO_LONG) + } else { + Ok(()) + } +} + +#[cfg(test)] +#[test] +fn test_parse() { + use super::*; + use super::IMPOSSIBLE; + + // workaround for Rust issue #22255 + fn parse_all(s: &str, items: &[Item]) -> ParseResult { + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, items.iter().cloned())); + Ok(parsed) + } + + macro_rules! check { + ($fmt:expr, $items:expr; $err:tt) => ( + assert_eq!(parse_all($fmt, &$items), Err($err)) + ); + ($fmt:expr, $items:expr; $($k:ident: $v:expr),*) => ( + assert_eq!(parse_all($fmt, &$items), Ok(Parsed { $($k: Some($v),)* ..Parsed::new() })) + ); + } + + // empty string + check!("", []; ); + check!(" ", []; TOO_LONG); + check!("a", []; TOO_LONG); + + // whitespaces + check!("", [sp!("")]; ); + check!(" ", [sp!("")]; ); + check!("\t", [sp!("")]; ); + check!(" \n\r \n", [sp!("")]; ); + check!("a", [sp!("")]; TOO_LONG); + + // literal + check!("", [lit!("a")]; TOO_SHORT); + check!(" ", [lit!("a")]; INVALID); + check!("a", [lit!("a")]; ); + check!("aa", [lit!("a")]; TOO_LONG); + check!("A", [lit!("a")]; INVALID); + check!("xy", [lit!("xy")]; ); + check!("xy", [lit!("x"), lit!("y")]; ); + check!("x y", [lit!("x"), lit!("y")]; INVALID); + check!("xy", [lit!("x"), sp!(""), lit!("y")]; ); + check!("x y", [lit!("x"), sp!(""), lit!("y")]; ); + + // numeric + check!("1987", [num!(Year)]; year: 1987); + check!("1987 ", [num!(Year)]; TOO_LONG); + check!("0x12", [num!(Year)]; TOO_LONG); // `0` is parsed + check!("x123", [num!(Year)]; INVALID); + check!("2015", [num!(Year)]; year: 2015); + check!("0000", [num!(Year)]; year: 0); + check!("9999", [num!(Year)]; year: 9999); + check!(" \t987", [num!(Year)]; year: 987); + check!("5", [num!(Year)]; year: 5); + check!("5\0", [num!(Year)]; TOO_LONG); + check!("\05", [num!(Year)]; INVALID); + check!("", [num!(Year)]; TOO_SHORT); + check!("12345", [num!(Year), lit!("5")]; year: 1234); + check!("12345", [nums!(Year), lit!("5")]; year: 1234); + check!("12345", [num0!(Year), lit!("5")]; year: 1234); + check!("12341234", [num!(Year), num!(Year)]; year: 1234); + check!("1234 1234", [num!(Year), num!(Year)]; year: 1234); + check!("1234 1235", [num!(Year), num!(Year)]; IMPOSSIBLE); + check!("1234 1234", [num!(Year), lit!("x"), num!(Year)]; INVALID); + check!("1234x1234", [num!(Year), lit!("x"), num!(Year)]; year: 1234); + check!("1234xx1234", [num!(Year), lit!("x"), num!(Year)]; INVALID); + check!("1234 x 1234", [num!(Year), lit!("x"), num!(Year)]; INVALID); + + // signed numeric + check!("-42", [num!(Year)]; year: -42); + check!("+42", [num!(Year)]; year: 42); + check!("-0042", [num!(Year)]; year: -42); + check!("+0042", [num!(Year)]; year: 42); + check!("-42195", [num!(Year)]; year: -42195); + check!("+42195", [num!(Year)]; year: 42195); + check!(" -42195", [num!(Year)]; year: -42195); + check!(" +42195", [num!(Year)]; year: 42195); + check!(" - 42", [num!(Year)]; INVALID); + check!(" + 42", [num!(Year)]; INVALID); + check!("-", [num!(Year)]; TOO_SHORT); + check!("+", [num!(Year)]; TOO_SHORT); + + // unsigned numeric + check!("345", [num!(Ordinal)]; ordinal: 345); + check!("+345", [num!(Ordinal)]; INVALID); + check!("-345", [num!(Ordinal)]; INVALID); + check!(" 345", [num!(Ordinal)]; ordinal: 345); + check!(" +345", [num!(Ordinal)]; INVALID); + check!(" -345", [num!(Ordinal)]; INVALID); + + // various numeric fields + check!("1234 5678", + [num!(Year), num!(IsoYear)]; + year: 1234, isoyear: 5678); + check!("12 34 56 78", + [num!(YearDiv100), num!(YearMod100), num!(IsoYearDiv100), num!(IsoYearMod100)]; + year_div_100: 12, year_mod_100: 34, isoyear_div_100: 56, isoyear_mod_100: 78); + check!("1 2 3 4 5 6", + [num!(Month), num!(Day), num!(WeekFromSun), num!(WeekFromMon), num!(IsoWeek), + num!(NumDaysFromSun)]; + month: 1, day: 2, week_from_sun: 3, week_from_mon: 4, isoweek: 5, weekday: Weekday::Sat); + check!("7 89 01", + [num!(WeekdayFromMon), num!(Ordinal), num!(Hour12)]; + weekday: Weekday::Sun, ordinal: 89, hour_mod_12: 1); + check!("23 45 6 78901234 567890123", + [num!(Hour), num!(Minute), num!(Second), num!(Nanosecond), num!(Timestamp)]; + hour_div_12: 1, hour_mod_12: 11, minute: 45, second: 6, nanosecond: 78_901_234, + timestamp: 567_890_123); + + // fixed: month and weekday names + check!("apr", [fix!(ShortMonthName)]; month: 4); + check!("Apr", [fix!(ShortMonthName)]; month: 4); + check!("APR", [fix!(ShortMonthName)]; month: 4); + check!("ApR", [fix!(ShortMonthName)]; month: 4); + check!("April", [fix!(ShortMonthName)]; TOO_LONG); // `Apr` is parsed + check!("A", [fix!(ShortMonthName)]; TOO_SHORT); + check!("Sol", [fix!(ShortMonthName)]; INVALID); + check!("Apr", [fix!(LongMonthName)]; month: 4); + check!("Apri", [fix!(LongMonthName)]; TOO_LONG); // `Apr` is parsed + check!("April", [fix!(LongMonthName)]; month: 4); + check!("Aprill", [fix!(LongMonthName)]; TOO_LONG); + check!("Aprill", [fix!(LongMonthName), lit!("l")]; month: 4); + check!("Aprl", [fix!(LongMonthName), lit!("l")]; month: 4); + check!("April", [fix!(LongMonthName), lit!("il")]; TOO_SHORT); // do not backtrack + check!("thu", [fix!(ShortWeekdayName)]; weekday: Weekday::Thu); + check!("Thu", [fix!(ShortWeekdayName)]; weekday: Weekday::Thu); + check!("THU", [fix!(ShortWeekdayName)]; weekday: Weekday::Thu); + check!("tHu", [fix!(ShortWeekdayName)]; weekday: Weekday::Thu); + check!("Thursday", [fix!(ShortWeekdayName)]; TOO_LONG); // `Thu` is parsed + check!("T", [fix!(ShortWeekdayName)]; TOO_SHORT); + check!("The", [fix!(ShortWeekdayName)]; INVALID); + check!("Nop", [fix!(ShortWeekdayName)]; INVALID); + check!("Thu", [fix!(LongWeekdayName)]; weekday: Weekday::Thu); + check!("Thur", [fix!(LongWeekdayName)]; TOO_LONG); // `Thu` is parsed + check!("Thurs", [fix!(LongWeekdayName)]; TOO_LONG); // ditto + check!("Thursday", [fix!(LongWeekdayName)]; weekday: Weekday::Thu); + check!("Thursdays", [fix!(LongWeekdayName)]; TOO_LONG); + check!("Thursdays", [fix!(LongWeekdayName), lit!("s")]; weekday: Weekday::Thu); + check!("Thus", [fix!(LongWeekdayName), lit!("s")]; weekday: Weekday::Thu); + check!("Thursday", [fix!(LongWeekdayName), lit!("rsday")]; TOO_SHORT); // do not backtrack + + // fixed: am/pm + check!("am", [fix!(LowerAmPm)]; hour_div_12: 0); + check!("pm", [fix!(LowerAmPm)]; hour_div_12: 1); + check!("AM", [fix!(LowerAmPm)]; hour_div_12: 0); + check!("PM", [fix!(LowerAmPm)]; hour_div_12: 1); + check!("am", [fix!(UpperAmPm)]; hour_div_12: 0); + check!("pm", [fix!(UpperAmPm)]; hour_div_12: 1); + check!("AM", [fix!(UpperAmPm)]; hour_div_12: 0); + check!("PM", [fix!(UpperAmPm)]; hour_div_12: 1); + check!("Am", [fix!(LowerAmPm)]; hour_div_12: 0); + check!(" Am", [fix!(LowerAmPm)]; INVALID); + check!("ame", [fix!(LowerAmPm)]; TOO_LONG); // `am` is parsed + check!("a", [fix!(LowerAmPm)]; TOO_SHORT); + check!("p", [fix!(LowerAmPm)]; TOO_SHORT); + check!("x", [fix!(LowerAmPm)]; TOO_SHORT); + check!("xx", [fix!(LowerAmPm)]; INVALID); + check!("", [fix!(LowerAmPm)]; TOO_SHORT); + + // fixed: dot plus nanoseconds + check!("", [fix!(Nanosecond)]; ); // no field set, but not an error + check!("4", [fix!(Nanosecond)]; TOO_LONG); // never consumes `4` + check!("4", [fix!(Nanosecond), num!(Second)]; second: 4); + check!(".0", [fix!(Nanosecond)]; nanosecond: 0); + check!(".4", [fix!(Nanosecond)]; nanosecond: 400_000_000); + check!(".42", [fix!(Nanosecond)]; nanosecond: 420_000_000); + check!(".421", [fix!(Nanosecond)]; nanosecond: 421_000_000); + check!(".42195", [fix!(Nanosecond)]; nanosecond: 421_950_000); + check!(".421950803", [fix!(Nanosecond)]; nanosecond: 421_950_803); + check!(".421950803547", [fix!(Nanosecond)]; nanosecond: 421_950_803); + check!(".000000003547", [fix!(Nanosecond)]; nanosecond: 3); + check!(".000000000547", [fix!(Nanosecond)]; nanosecond: 0); + check!(".", [fix!(Nanosecond)]; TOO_SHORT); + check!(".4x", [fix!(Nanosecond)]; TOO_LONG); + check!(". 4", [fix!(Nanosecond)]; INVALID); + check!(" .4", [fix!(Nanosecond)]; TOO_LONG); // no automatic trimming + + // fixed: timezone offsets + check!("+00:00", [fix!(TimezoneOffset)]; offset: 0); + check!("-00:00", [fix!(TimezoneOffset)]; offset: 0); + check!("+00:01", [fix!(TimezoneOffset)]; offset: 60); + check!("-00:01", [fix!(TimezoneOffset)]; offset: -60); + check!("+00:30", [fix!(TimezoneOffset)]; offset: 30 * 60); + check!("-00:30", [fix!(TimezoneOffset)]; offset: -30 * 60); + check!("+04:56", [fix!(TimezoneOffset)]; offset: 296 * 60); + check!("-04:56", [fix!(TimezoneOffset)]; offset: -296 * 60); + check!("+24:00", [fix!(TimezoneOffset)]; offset: 24 * 60 * 60); + check!("-24:00", [fix!(TimezoneOffset)]; offset: -24 * 60 * 60); + check!("+99:59", [fix!(TimezoneOffset)]; offset: (100 * 60 - 1) * 60); + check!("-99:59", [fix!(TimezoneOffset)]; offset: -(100 * 60 - 1) * 60); + check!("+00:59", [fix!(TimezoneOffset)]; offset: 59 * 60); + check!("+00:60", [fix!(TimezoneOffset)]; OUT_OF_RANGE); + check!("+00:99", [fix!(TimezoneOffset)]; OUT_OF_RANGE); + check!("#12:34", [fix!(TimezoneOffset)]; INVALID); + check!("12:34", [fix!(TimezoneOffset)]; INVALID); + check!("+12:34 ", [fix!(TimezoneOffset)]; TOO_LONG); + check!(" +12:34", [fix!(TimezoneOffset)]; offset: 754 * 60); + check!("\t -12:34", [fix!(TimezoneOffset)]; offset: -754 * 60); + check!("", [fix!(TimezoneOffset)]; TOO_SHORT); + check!("+", [fix!(TimezoneOffset)]; TOO_SHORT); + check!("+1", [fix!(TimezoneOffset)]; TOO_SHORT); + check!("+12", [fix!(TimezoneOffset)]; TOO_SHORT); + check!("+123", [fix!(TimezoneOffset)]; TOO_SHORT); + check!("+1234", [fix!(TimezoneOffset)]; offset: 754 * 60); + check!("+12345", [fix!(TimezoneOffset)]; TOO_LONG); + check!("+12345", [fix!(TimezoneOffset), num!(Day)]; offset: 754 * 60, day: 5); + check!("Z", [fix!(TimezoneOffset)]; INVALID); + check!("z", [fix!(TimezoneOffset)]; INVALID); + check!("Z", [fix!(TimezoneOffsetZ)]; offset: 0); + check!("z", [fix!(TimezoneOffsetZ)]; offset: 0); + check!("Y", [fix!(TimezoneOffsetZ)]; INVALID); + check!("Zulu", [fix!(TimezoneOffsetZ), lit!("ulu")]; offset: 0); + check!("zulu", [fix!(TimezoneOffsetZ), lit!("ulu")]; offset: 0); + check!("+1234ulu", [fix!(TimezoneOffsetZ), lit!("ulu")]; offset: 754 * 60); + check!("+12:34ulu", [fix!(TimezoneOffsetZ), lit!("ulu")]; offset: 754 * 60); + check!("???", [fix!(TimezoneName)]; BAD_FORMAT); // not allowed + + // some practical examples + check!("2015-02-04T14:37:05+09:00", + [num!(Year), lit!("-"), num!(Month), lit!("-"), num!(Day), lit!("T"), + num!(Hour), lit!(":"), num!(Minute), lit!(":"), num!(Second), fix!(TimezoneOffset)]; + year: 2015, month: 2, day: 4, hour_div_12: 1, hour_mod_12: 2, + minute: 37, second: 5, offset: 32400); + check!("Mon, 10 Jun 2013 09:32:37 GMT", + [fix!(ShortWeekdayName), lit!(","), sp!(" "), num!(Day), sp!(" "), + fix!(ShortMonthName), sp!(" "), num!(Year), sp!(" "), num!(Hour), lit!(":"), + num!(Minute), lit!(":"), num!(Second), sp!(" "), lit!("GMT")]; + year: 2013, month: 6, day: 10, weekday: Weekday::Mon, + hour_div_12: 0, hour_mod_12: 9, minute: 32, second: 37); + check!("20060102150405", + [num!(Year), num!(Month), num!(Day), num!(Hour), num!(Minute), num!(Second)]; + year: 2006, month: 1, day: 2, hour_div_12: 1, hour_mod_12: 3, minute: 4, second: 5); + check!("3:14PM", + [num!(Hour12), lit!(":"), num!(Minute), fix!(LowerAmPm)]; + hour_div_12: 1, hour_mod_12: 3, minute: 14); + check!("12345678901234.56789", + [num!(Timestamp), lit!("."), num!(Nanosecond)]; + nanosecond: 56_789, timestamp: 12_345_678_901_234); + check!("12345678901234.56789", + [num!(Timestamp), fix!(Nanosecond)]; + nanosecond: 567_890_000, timestamp: 12_345_678_901_234); +} + +#[cfg(test)] +#[test] +fn test_rfc2822() { + use datetime::DateTime; + use offset::fixed::FixedOffset; + use super::*; + use super::NOT_ENOUGH; + + // Test data - (input, Ok(expected result after parse and format) or Err(error code)) + let testdates = [ + ("Tue, 20 Jan 2015 17:35:20 -0800", Ok("Tue, 20 Jan 2015 17:35:20 -0800")), // normal case + ("20 Jan 2015 17:35:20 -0800", Ok("Tue, 20 Jan 2015 17:35:20 -0800")), // no day of week + ("20 JAN 2015 17:35:20 -0800", Ok("Tue, 20 Jan 2015 17:35:20 -0800")), // upper case month + ("11 Sep 2001 09:45:00 EST", Ok("Tue, 11 Sep 2001 09:45:00 -0500")), + ("30 Feb 2015 17:35:20 -0800", Err(OUT_OF_RANGE)), // bad day of month + ("Tue, 20 Jan 2015", Err(TOO_SHORT)), // omitted fields + ("Tue, 20 Avr 2015 17:35:20 -0800", Err(INVALID)), // bad month name + ("Tue, 20 Jan 2015 25:35:20 -0800", Err(OUT_OF_RANGE)), // bad hour + ("Tue, 20 Jan 2015 7:35:20 -0800", Err(INVALID)), // bad # of digits in hour + ("Tue, 20 Jan 2015 17:65:20 -0800", Err(OUT_OF_RANGE)), // bad minute + ("Tue, 20 Jan 2015 17:35:90 -0800", Err(OUT_OF_RANGE)), // bad second + ("Tue, 20 Jan 2015 17:35:20 -0890", Err(OUT_OF_RANGE)), // bad offset + ("6 Jun 1944 04:00:00Z", Err(INVALID)), // bad offset (zulu not allowed) + ("Tue, 20 Jan 2015 17:35:20 HAS", Err(NOT_ENOUGH)) // bad named time zone + ]; + + fn rfc2822_to_datetime(date: &str) -> ParseResult> { + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, date, [Item::Fixed(Fixed::RFC2822)].iter().cloned())); + parsed.to_datetime() + } + + fn fmt_rfc2822_datetime(dt: DateTime) -> String { + dt.format_with_items([Item::Fixed(Fixed::RFC2822)].iter().cloned()).to_string() + } + + // Test against test data above + for &(date, checkdate) in testdates.iter() { + let d = rfc2822_to_datetime(date); // parse a date + let dt = match d { // did we get a value? + Ok(dt) => Ok(fmt_rfc2822_datetime(dt)), // yes, go on + Err(e) => Err(e), // otherwise keep an error for the comparison + }; + if dt != checkdate.map(|s| s.to_string()) { // check for expected result + panic!("Date conversion failed for {}\nReceived: {:?}\nExpected: {:?}", + date, dt, checkdate); + } + }; +} + + + +#[cfg(test)] +#[test] +fn parse_rfc850() { + use ::{UTC, TimeZone}; + + static RFC850_FMT: &'static str = "%A, %d-%b-%y %T GMT"; + + let dt_str = "Sunday, 06-Nov-94 08:49:37 GMT"; + let dt = UTC.ymd(1994, 11, 6).and_hms(8, 49, 37); + + // Check that the format is what we expect + assert_eq!(dt.format(RFC850_FMT).to_string(), dt_str); + + // Check that it parses correctly + assert_eq!(Ok(dt), UTC.datetime_from_str("Sunday, 06-Nov-94 08:49:37 GMT", RFC850_FMT)); + + // Check that the rest of the weekdays parse correctly (this test originally failed because + // Sunday parsed incorrectly). + let testdates = [ + (UTC.ymd(1994, 11, 7).and_hms(8, 49, 37), "Monday, 07-Nov-94 08:49:37 GMT"), + (UTC.ymd(1994, 11, 8).and_hms(8, 49, 37), "Tuesday, 08-Nov-94 08:49:37 GMT"), + (UTC.ymd(1994, 11, 9).and_hms(8, 49, 37), "Wednesday, 09-Nov-94 08:49:37 GMT"), + (UTC.ymd(1994, 11, 10).and_hms(8, 49, 37), "Thursday, 10-Nov-94 08:49:37 GMT"), + (UTC.ymd(1994, 11, 11).and_hms(8, 49, 37), "Friday, 11-Nov-94 08:49:37 GMT"), + (UTC.ymd(1994, 11, 12).and_hms(8, 49, 37), "Saturday, 12-Nov-94 08:49:37 GMT"), + ]; + + for val in &testdates { + assert_eq!(Ok(val.0), UTC.datetime_from_str(val.1, RFC850_FMT)); + } +} + +#[cfg(test)] +#[test] +fn test_rfc3339() { + use datetime::DateTime; + use offset::fixed::FixedOffset; + use super::*; + + // Test data - (input, Ok(expected result after parse and format) or Err(error code)) + let testdates = [ + ("2015-01-20T17:35:20-08:00", Ok("2015-01-20T17:35:20-08:00")), // normal case + ("1944-06-06T04:04:00Z", Ok("1944-06-06T04:04:00+00:00")), // D-day + ("2001-09-11T09:45:00-08:00", Ok("2001-09-11T09:45:00-08:00")), + ("2015-01-20T17:35:20.001-08:00", Ok("2015-01-20T17:35:20.001-08:00")), + ("2015-01-20T17:35:20.000031-08:00", Ok("2015-01-20T17:35:20.000031-08:00")), + ("2015-01-20T17:35:20.000000004-08:00", Ok("2015-01-20T17:35:20.000000004-08:00")), + ("2015-01-20T17:35:20.000000000452-08:00", Ok("2015-01-20T17:35:20-08:00")), // too small + ("2015-02-30T17:35:20-08:00", Err(OUT_OF_RANGE)), // bad day of month + ("2015-01-20T25:35:20-08:00", Err(OUT_OF_RANGE)), // bad hour + ("2015-01-20T17:65:20-08:00", Err(OUT_OF_RANGE)), // bad minute + ("2015-01-20T17:35:90-08:00", Err(OUT_OF_RANGE)), // bad second + ("2015-01-20T17:35:20-24:00", Err(OUT_OF_RANGE)), // bad offset + ]; + + fn rfc3339_to_datetime(date: &str) -> ParseResult> { + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, date, [Item::Fixed(Fixed::RFC3339)].iter().cloned())); + parsed.to_datetime() + } + + fn fmt_rfc3339_datetime(dt: DateTime) -> String { + dt.format_with_items([Item::Fixed(Fixed::RFC3339)].iter().cloned()).to_string() + } + + // Test against test data above + for &(date, checkdate) in testdates.iter() { + let d = rfc3339_to_datetime(date); // parse a date + let dt = match d { // did we get a value? + Ok(dt) => Ok(fmt_rfc3339_datetime(dt)), // yes, go on + Err(e) => Err(e), // otherwise keep an error for the comparison + }; + if dt != checkdate.map(|s| s.to_string()) { // check for expected result + panic!("Date conversion failed for {}\nReceived: {:?}\nExpected: {:?}", + date, dt, checkdate); + } + }; +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/format/scan.rs cargo-0.19.0/vendor/chrono-0.2.25/src/format/scan.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/format/scan.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/format/scan.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,262 @@ +// This is a part of rust-chrono. +// Copyright (c) 2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +/*! + * Various scanning routines for the parser. + */ + +use Weekday; +use super::{ParseResult, TOO_SHORT, INVALID, OUT_OF_RANGE}; + +/// Returns true when two slices are equal case-insensitively (in ASCII). +/// Assumes that the `pattern` is already converted to lower case. +fn equals(s: &str, pattern: &str) -> bool { + let mut xs = s.as_bytes().iter().map(|&c| match c { b'A'...b'Z' => c + 32, _ => c }); + let mut ys = pattern.as_bytes().iter().cloned(); + loop { + match (xs.next(), ys.next()) { + (None, None) => return true, + (None, _) | (_, None) => return false, + (Some(x), Some(y)) if x != y => return false, + _ => (), + } + } +} + +/// Tries to parse the non-negative number from `min` to `max` digits. +/// +/// The absence of digits at all is an unconditional error. +/// More than `max` digits are consumed up to the first `max` digits. +/// Any number that does not fit in `i64` is an error. +pub fn number(s: &str, min: usize, max: usize) -> ParseResult<(&str, i64)> { + assert!(min <= max); + + // limit `s` to given number of digits + let mut window = s.as_bytes(); + if window.len() > max { window = &window[..max]; } + + // scan digits + let upto = window.iter().position(|&c| c < b'0' || b'9' < c).unwrap_or(window.len()); + if upto < min { + return Err(if window.is_empty() {TOO_SHORT} else {INVALID}); + } + + // we can overflow here, which is the only possible cause of error from `parse`. + let v: i64 = try!(s[..upto].parse().map_err(|_| OUT_OF_RANGE)); + Ok((&s[upto..], v)) +} + +/// Tries to consume at least one digits as a fractional second. +/// Returns the number of whole nanoseconds (0--999,999,999). +pub fn nanosecond(s: &str) -> ParseResult<(&str, i64)> { + // record the number of digits consumed for later scaling. + let origlen = s.len(); + let (s, v) = try!(number(s, 1, 9)); + let consumed = origlen - s.len(); + + // scale the number accordingly. + static SCALE: [i64; 10] = [0, 100_000_000, 10_000_000, 1_000_000, 100_000, 10_000, + 1_000, 100, 10, 1]; + let v = try!(v.checked_mul(SCALE[consumed]).ok_or(OUT_OF_RANGE)); + + // if there are more than 9 digits, skip next digits. + let s = s.trim_left_matches(|c: char| '0' <= c && c <= '9'); + + Ok((s, v)) +} + +/// Tries to parse the month index (0 through 11) with the first three ASCII letters. +pub fn short_month0(s: &str) -> ParseResult<(&str, u8)> { + if s.len() < 3 { return Err(TOO_SHORT); } + let buf = s.as_bytes(); + let month0 = match (buf[0] | 32, buf[1] | 32, buf[2] | 32) { + (b'j',b'a',b'n') => 0, + (b'f',b'e',b'b') => 1, + (b'm',b'a',b'r') => 2, + (b'a',b'p',b'r') => 3, + (b'm',b'a',b'y') => 4, + (b'j',b'u',b'n') => 5, + (b'j',b'u',b'l') => 6, + (b'a',b'u',b'g') => 7, + (b's',b'e',b'p') => 8, + (b'o',b'c',b't') => 9, + (b'n',b'o',b'v') => 10, + (b'd',b'e',b'c') => 11, + _ => return Err(INVALID) + }; + Ok((&s[3..], month0)) +} + +/// Tries to parse the weekday with the first three ASCII letters. +pub fn short_weekday(s: &str) -> ParseResult<(&str, Weekday)> { + if s.len() < 3 { return Err(TOO_SHORT); } + let buf = s.as_bytes(); + let weekday = match (buf[0] | 32, buf[1] | 32, buf[2] | 32) { + (b'm',b'o',b'n') => Weekday::Mon, + (b't',b'u',b'e') => Weekday::Tue, + (b'w',b'e',b'd') => Weekday::Wed, + (b't',b'h',b'u') => Weekday::Thu, + (b'f',b'r',b'i') => Weekday::Fri, + (b's',b'a',b't') => Weekday::Sat, + (b's',b'u',b'n') => Weekday::Sun, + _ => return Err(INVALID) + }; + Ok((&s[3..], weekday)) +} + +/// Tries to parse the month index (0 through 11) with short or long month names. +/// It prefers long month names to short month names when both are possible. +pub fn short_or_long_month0(s: &str) -> ParseResult<(&str, u8)> { + // lowercased month names, minus first three chars + static LONG_MONTH_SUFFIXES: [&'static str; 12] = + ["uary", "ruary", "ch", "il", "", "e", "y", "ust", "tember", "ober", "ember", "ember"]; + + let (mut s, month0) = try!(short_month0(s)); + + // tries to consume the suffix if possible + let suffix = LONG_MONTH_SUFFIXES[month0 as usize]; + if s.len() >= suffix.len() && equals(&s[..suffix.len()], suffix) { + s = &s[suffix.len()..]; + } + + Ok((s, month0)) +} + +/// Tries to parse the weekday with short or long weekday names. +/// It prefers long weekday names to short weekday names when both are possible. +pub fn short_or_long_weekday(s: &str) -> ParseResult<(&str, Weekday)> { + // lowercased weekday names, minus first three chars + static LONG_WEEKDAY_SUFFIXES: [&'static str; 7] = + ["day", "sday", "nesday", "rsday", "day", "urday", "day"]; + + let (mut s, weekday) = try!(short_weekday(s)); + + // tries to consume the suffix if possible + let suffix = LONG_WEEKDAY_SUFFIXES[weekday.num_days_from_monday() as usize]; + if s.len() >= suffix.len() && equals(&s[..suffix.len()], suffix) { + s = &s[suffix.len()..]; + } + + Ok((s, weekday)) +} + +/// Tries to consume exactly one given character. +pub fn char(s: &str, c1: u8) -> ParseResult<&str> { + match s.as_bytes().first() { + Some(&c) if c == c1 => Ok(&s[1..]), + Some(_) => Err(INVALID), + None => Err(TOO_SHORT), + } +} + +/// Tries to consume one or more whitespace. +pub fn space(s: &str) -> ParseResult<&str> { + let s_ = s.trim_left(); + if s_.len() < s.len() { + Ok(s_) + } else if s.is_empty() { + Err(TOO_SHORT) + } else { + Err(INVALID) + } +} + +/// Consumes any number (including zero) of colon or spaces. +pub fn colon_or_space(s: &str) -> ParseResult<&str> { + Ok(s.trim_left_matches(|c: char| c == ':' || c.is_whitespace())) +} + +/// Tries to parse `[-+]\d\d` continued by `\d\d`. Return an offset in seconds if possible. +/// +/// The additional `colon` may be used to parse a mandatory or optional `:` +/// between hours and minutes, and should return either a new suffix or `Err` when parsing fails. +pub fn timezone_offset(mut s: &str, mut colon: F) -> ParseResult<(&str, i32)> + where F: FnMut(&str) -> ParseResult<&str> { + fn digits(s: &str) -> ParseResult<(u8, u8)> { + let b = s.as_bytes(); + if b.len() < 2 { + Err(TOO_SHORT) + } else { + Ok((b[0], b[1])) + } + } + let negative = match s.as_bytes().first() { + Some(&b'+') => false, + Some(&b'-') => true, + Some(_) => return Err(INVALID), + None => return Err(TOO_SHORT), + }; + s = &s[1..]; + + // hours (00--99) + let hours = match try!(digits(s)) { + (h1 @ b'0'...b'9', h2 @ b'0'...b'9') => ((h1 - b'0') * 10 + (h2 - b'0')) as i32, + _ => return Err(INVALID), + }; + s = &s[2..]; + + // colons (and possibly other separators) + s = try!(colon(s)); + + // minutes (00--59) + let minutes = match try!(digits(s)) { + (m1 @ b'0'...b'5', m2 @ b'0'...b'9') => ((m1 - b'0') * 10 + (m2 - b'0')) as i32, + (b'6'...b'9', b'0'...b'9') => return Err(OUT_OF_RANGE), + _ => return Err(INVALID), + }; + s = &s[2..]; + + let seconds = hours * 3600 + minutes * 60; + Ok((s, if negative {-seconds} else {seconds})) +} + +/// Same to `timezone_offset` but also allows for `z`/`Z` which is same to `+00:00`. +pub fn timezone_offset_zulu(s: &str, colon: F) -> ParseResult<(&str, i32)> + where F: FnMut(&str) -> ParseResult<&str> { + match s.as_bytes().first() { + Some(&b'z') | Some(&b'Z') => Ok((&s[1..], 0)), + _ => timezone_offset(s, colon), + } +} + +/// Same to `timezone_offset` but also allows for RFC 2822 legacy timezones. +/// May return `None` which indicates an insufficient offset data (i.e. `-0000`). +pub fn timezone_offset_2822(s: &str) -> ParseResult<(&str, Option)> { + // tries to parse legacy time zone names + let upto = s.as_bytes().iter().position(|&c| match c { b'a'...b'z' | b'A'...b'Z' => false, + _ => true }).unwrap_or(s.len()); + if upto > 0 { + let name = &s[..upto]; + let s = &s[upto..]; + if equals(name, "gmt") || equals(name, "ut") { + Ok((s, Some(0))) + } else if equals(name, "est") { + Ok((s, Some(-5 * 3600))) + } else if equals(name, "edt") { + Ok((s, Some(-4 * 3600))) + } else if equals(name, "cst") { + Ok((s, Some(-6 * 3600))) + } else if equals(name, "cdt") { + Ok((s, Some(-5 * 3600))) + } else if equals(name, "mst") { + Ok((s, Some(-7 * 3600))) + } else if equals(name, "mdt") { + Ok((s, Some(-6 * 3600))) + } else if equals(name, "pst") { + Ok((s, Some(-8 * 3600))) + } else if equals(name, "pdt") { + Ok((s, Some(-7 * 3600))) + } else { + Ok((s, None)) // recommended by RFC 2822: consume but treat it as -0000 + } + } else { + let (s_, offset) = try!(timezone_offset(s, |s| Ok(s))); + if offset == 0 && s.starts_with("-") { // -0000 is not same to +0000 + Ok((s_, None)) + } else { + Ok((s_, Some(offset))) + } + } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/format/strftime.rs cargo-0.19.0/vendor/chrono-0.2.25/src/format/strftime.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/format/strftime.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/format/strftime.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,443 @@ +// This is a part of rust-chrono. +// Copyright (c) 2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +/*! +`strftime`/`strptime`-inspired date and time formatting syntax. + +## Specifiers + +The following specifiers are available both to formatting and parsing. + +Spec. | Example | Description +----- | ------------- | ----------- + | | **DATE SPECIFIERS:** +`%Y` | `2001` | The full proleptic Gregorian year, zero-padded to 4 digits. [1] +`%C` | `20` | The proleptic Gregorian year divided by 100, zero-padded to 2 digits. [2] +`%y` | `01` | The proleptic Gregorian year modulo 100, zero-padded to 2 digits. [2] + | | +`%m` | `07` | Month number (01--12), zero-padded to 2 digits. +`%b` | `Jul` | Abbreviated month name. Always 3 letters. +`%B` | `July` | Full month name. Also accepts corresponding abbreviation in parsing. +`%h` | `Jul` | Same to `%b`. + | | +`%d` | `08` | Day number (01--31), zero-padded to 2 digits. +`%e` | ` 8` | Same to `%d` but space-padded. Same to `%_d`. + | | +`%a` | `Sun` | Abbreviated weekday name. Always 3 letters. +`%A` | `Sunday` | Full weekday name. Also accepts corresponding abbreviation in parsing. +`%w` | `0` | Sunday = 0, Monday = 1, ..., Saturday = 6. +`%u` | `7` | Monday = 1, Tuesday = 2, ..., Sunday = 7. (ISO 8601) + | | +`%U` | `28` | Week number starting with Sunday (00--53), zero-padded to 2 digits. [3] +`%W` | `27` | Same to `%U`, but week 1 starts with the first Monday in that year instead. + | | +`%G` | `2001` | Same to `%Y` but uses the year number in ISO 8601 week date. [4] +`%g` | `01` | Same to `%y` but uses the year number in ISO 8601 week date. [4] +`%V` | `27` | Same to `%U` but uses the week number in ISO 8601 week date (01--53). [4] + | | +`%j` | `189` | Day of the year (001--366), zero-padded to 3 digits. + | | +`%D` | `07/08/01` | Month-day-year format. Same to `%m/%d/%y`. +`%x` | `07/08/01` | Same to `%D`. +`%F` | `2001-07-08` | Year-month-day format (ISO 8601). Same to `%Y-%m-%d`. +`%v` | ` 8-Jul-2001` | Day-month-year format. Same to `%e-%b-%Y`. + | | + | | **TIME SPECIFIERS:** +`%H` | `00` | Hour number (00--23), zero-padded to 2 digits. +`%k` | ` 0` | Same to `%H` but space-padded. Same to `%_H`. +`%I` | `12` | Hour number in 12-hour clocks (01--12), zero-padded to 2 digits. +`%l` | `12` | Same to `%I` but space-padded. Same to `%_I`. + | | +`%P` | `am` | `am` or `pm` in 12-hour clocks. +`%p` | `AM` | `AM` or `PM` in 12-hour clocks. + | | +`%M` | `34` | Minute number (00--59), zero-padded to 2 digits. +`%S` | `60` | Second number (00--60), zero-padded to 2 digits. [5] +`%f` | `026490000` | The fractional seconds (in nanoseconds) since last whole second. [8] +`%.f` | `.026490` | Similar to `.%f` but left-aligned. [8] +`%.3f`| `.026` | Similar to `.%f` but left-aligned but fixed to a length of 3. [8] +`%.6f`| `.026490` | Similar to `.%f` but left-aligned but fixed to a length of 6. [8] +`%.9f`| `.026490000` | Similar to `.%f` but left-aligned but fixed to a length of 9. [8] + | | +`%R` | `00:34` | Hour-minute format. Same to `%H:%M`. +`%T` | `00:34:60` | Hour-minute-second format. Same to `%H:%M:%S`. +`%X` | `00:34:60` | Same to `%T`. +`%r` | `12:34:60 AM` | Hour-minute-second format in 12-hour clocks. Same to `%I:%M:%S %p`. + | | + | | **TIME ZONE SPECIFIERS:** +`%Z` | `ACST` | *Formatting only:* Local time zone name. +`%z` | `+0930` | Offset from the local time to UTC (with UTC being `+0000`). +`%:z` | `+09:30` | Same to `%z` but with a colon. + | | + | | **DATE & TIME SPECIFIERS:** +`%c` | `Sun Jul 8 00:34:60 2001` | `ctime` date & time format. Same to `%a %b %e %T %Y` sans `\n`. +`%+` | `2001-07-08T00:34:60.026490+09:30` | ISO 8601 / RFC 3339 date & time format. [6] + | | +`%s` | `994518299` | UNIX timestamp, the number of seconds since 1970-01-01 00:00 UTC. [7] + | | + | | **SPECIAL SPECIFIERS:** +`%t` | | Literal tab (`\t`). +`%n` | | Literal newline (`\n`). +`%%` | | Literal percent sign. + +It is possible to override the default padding behavior of numeric specifiers `%?`. +This is not allowed for other specifiers and will result in the `BAD_FORMAT` error. + +Modifier | Description +-------- | ----------- +`%-?` | Suppresses any padding including spaces and zeroes. (e.g. `%j` = `012`, `%-j` = `12`) +`%_?` | Uses spaces as a padding. (e.g. `%j` = `012`, `%_j` = ` 12`) +`%0?` | Uses zeroes as a padding. (e.g. `%e` = ` 9`, `%0e` = `09`) + +Notes: + +1. `%Y`: + Negative years are allowed in formatting but not in parsing. + +2. `%C`, `%y`: + This is floor division, so 100 BCE (year number -99) will print `-1` and `99` respectively. + +3. `%U`: + Week 1 starts with the first Sunday in that year. + It is possible to have week 0 for days before the first Sunday. + +4. `%G`, `%g`, `%V`: + Week 1 is the first week with at least 4 days in that year. + Week 0 does not exist, so this should be used with `%G` or `%g`. + +5. `%S`: + It accounts for leap seconds, so `60` is possible. + +6. `%+`: + Same to `%Y-%m-%dT%H:%M:%S%.f%:z`, + i.e. 0, 3, 6 or 9 fractional digits for seconds and colons in the time zone offset. + + The typical `strftime` implementations have + different (and locale-dependent) formats for this specifier. + While Chrono's format for `%+` is far more stable, + it is best to avoid this specifier if you want to control the exact output. + +7. `%s`: + This is not padded and can be negative. + For the purpose of Chrono, it only accounts for non-leap seconds + so it slightly differs from ISO C `strftime` behavior. + +8. `%f`, `%.f`, `%.3f`, `%.6f`, `%.9f`: + + The default `%f` is right-aligned and always zero-padded to 9 digits + for the compatibility with glibc and others, + so it always counts the number of nanoseconds since the last whole second. + E.g. 7ms after the last second will print `007000000`, + and parsing `7000000` will yield the same. + + The variant `%.f` is left-aligned and print 0, 3, 6 or 9 fractional digits + according to the precision. + E.g. 70ms after the last second under `%.f` will print `.070` (note: not `.07`), + and parsing `.07`, `.070000` etc. will yield the same. + Note that they can print or read nothing if the fractional part is zero or + the next character is not `.`. + + The variant `%.3f`, `%.6f` and `%.9f` are left-aligned and print 3, 6 or 9 fractional digits + according to the number preceding `f`. + E.g. 70ms after the last second under `%.3f` will print `.070` (note: not `.07`), + and parsing `.07`, `.070000` etc. will yield the same. + Note that they can read nothing if the fractional part is zero or + the next character is not `.` however will print with the specified length. + +*/ + +use super::{Item, Numeric, Fixed, Pad}; + +/// Parsing iterator for `strftime`-like format strings. +#[derive(Clone)] +pub struct StrftimeItems<'a> { + /// Remaining portion of the string. + remainder: &'a str, + /// If the current specifier is composed of multiple formatting items (e.g. `%+`), + /// parser refers to the statically reconstructed slice of them. + /// If `recons` is not empty they have to be returned earlier than the `remainder`. + recons: &'static [Item<'static>], +} + +impl<'a> StrftimeItems<'a> { + /// Creates a new parsing iterator from the `strftime`-like format string. + pub fn new(s: &'a str) -> StrftimeItems<'a> { + static FMT_NONE: [Item<'static>; 0] = []; + StrftimeItems { remainder: s, recons: &FMT_NONE } + } +} + +impl<'a> Iterator for StrftimeItems<'a> { + type Item = Item<'a>; + + fn next(&mut self) -> Option> { + // we have some reconstructed items to return + if !self.recons.is_empty() { + let item = self.recons[0]; + self.recons = &self.recons[1..]; + return Some(item); + } + + match self.remainder.chars().next() { + // we are done + None => return None, + + // the next item is a specifier + Some('%') => { + self.remainder = &self.remainder[1..]; + + macro_rules! next { + () => ( + match self.remainder.chars().next() { + Some(x) => { + self.remainder = &self.remainder[x.len_utf8()..]; + x + }, + None => return Some(Item::Error), // premature end of string + } + ) + } + + let spec = next!(); + let pad_override = match spec { + '-' => Some(Pad::None), + '0' => Some(Pad::Zero), + '_' => Some(Pad::Space), + _ => None, + }; + let spec = if pad_override.is_some() { next!() } else { spec }; + + macro_rules! recons { + [$head:expr, $($tail:expr),+] => ({ + const RECONS: &'static [Item<'static>] = &[$($tail),+]; + self.recons = RECONS; + $head + }) + } + + let item = match spec { + 'A' => fix!(LongWeekdayName), + 'B' => fix!(LongMonthName), + 'C' => num0!(YearDiv100), + 'D' => recons![num0!(Month), lit!("/"), num0!(Day), lit!("/"), + num0!(YearMod100)], + 'F' => recons![num0!(Year), lit!("-"), num0!(Month), lit!("-"), num0!(Day)], + 'G' => num0!(IsoYear), + 'H' => num0!(Hour), + 'I' => num0!(Hour12), + 'M' => num0!(Minute), + 'P' => fix!(LowerAmPm), + 'R' => recons![num0!(Hour), lit!(":"), num0!(Minute)], + 'S' => num0!(Second), + 'T' => recons![num0!(Hour), lit!(":"), num0!(Minute), lit!(":"), num0!(Second)], + 'U' => num0!(WeekFromSun), + 'V' => num0!(IsoWeek), + 'W' => num0!(WeekFromMon), + 'X' => recons![num0!(Hour), lit!(":"), num0!(Minute), lit!(":"), num0!(Second)], + 'Y' => num0!(Year), + 'Z' => fix!(TimezoneName), + 'a' => fix!(ShortWeekdayName), + 'b' => fix!(ShortMonthName), + 'c' => recons![fix!(ShortWeekdayName), sp!(" "), fix!(ShortMonthName), + sp!(" "), nums!(Day), sp!(" "), num0!(Hour), lit!(":"), + num0!(Minute), lit!(":"), num0!(Second), sp!(" "), num0!(Year)], + 'd' => num0!(Day), + 'e' => nums!(Day), + 'f' => num0!(Nanosecond), + 'g' => num0!(IsoYearMod100), + 'h' => fix!(ShortMonthName), + 'j' => num0!(Ordinal), + 'k' => nums!(Hour), + 'l' => nums!(Hour12), + 'm' => num0!(Month), + 'n' => sp!("\n"), + 'p' => fix!(UpperAmPm), + 'r' => recons![num0!(Hour12), lit!(":"), num0!(Minute), lit!(":"), + num0!(Second), sp!(" "), fix!(UpperAmPm)], + 's' => num!(Timestamp), + 't' => sp!("\t"), + 'u' => num!(WeekdayFromMon), + 'v' => recons![nums!(Day), lit!("-"), fix!(ShortMonthName), lit!("-"), + num0!(Year)], + 'w' => num!(NumDaysFromSun), + 'x' => recons![num0!(Month), lit!("/"), num0!(Day), lit!("/"), + num0!(YearMod100)], + 'y' => num0!(YearMod100), + 'z' => fix!(TimezoneOffset), + '+' => fix!(RFC3339), + ':' => match next!() { + 'z' => fix!(TimezoneOffsetColon), + _ => Item::Error, + }, + '.' => match next!() { + '3' => match next!() { + 'f' => fix!(Nanosecond3), + _ => Item::Error, + }, + '6' => match next!() { + 'f' => fix!(Nanosecond6), + _ => Item::Error, + }, + '9' => match next!() { + 'f' => fix!(Nanosecond9), + _ => Item::Error, + }, + 'f' => fix!(Nanosecond), + _ => Item::Error, + }, + '%' => lit!("%"), + _ => Item::Error, // no such specifier + }; + + // adjust `item` if we have any padding modifier + if let Some(new_pad) = pad_override { + match item { + Item::Numeric(kind, _pad) if self.recons.is_empty() => + Some(Item::Numeric(kind, new_pad)), + _ => Some(Item::Error), // no reconstructed or non-numeric item allowed + } + } else { + Some(item) + } + }, + + // the next item is space + Some(c) if c.is_whitespace() => { + // `%` is not a whitespace, so `c != '%'` is redundant + let nextspec = self.remainder.find(|c: char| !c.is_whitespace()) + .unwrap_or(self.remainder.len()); + assert!(nextspec > 0); + let item = sp!(&self.remainder[..nextspec]); + self.remainder = &self.remainder[nextspec..]; + Some(item) + }, + + // the next item is literal + _ => { + let nextspec = self.remainder.find(|c: char| c.is_whitespace() || c == '%') + .unwrap_or(self.remainder.len()); + assert!(nextspec > 0); + let item = lit!(&self.remainder[..nextspec]); + self.remainder = &self.remainder[nextspec..]; + Some(item) + }, + } + } +} + +#[cfg(test)] +#[test] +fn test_strftime_items() { + fn parse_and_collect<'a>(s: &'a str) -> Vec> { + // map any error into `[Item::Error]`. useful for easy testing. + let items = StrftimeItems::new(s); + let items = items.map(|spec| if spec == Item::Error {None} else {Some(spec)}); + items.collect::>>().unwrap_or(vec![Item::Error]) + } + + assert_eq!(parse_and_collect(""), []); + assert_eq!(parse_and_collect(" \t\n\r "), [sp!(" \t\n\r ")]); + assert_eq!(parse_and_collect("hello?"), [lit!("hello?")]); + assert_eq!(parse_and_collect("a b\t\nc"), [lit!("a"), sp!(" "), lit!("b"), sp!("\t\n"), + lit!("c")]); + assert_eq!(parse_and_collect("100%%"), [lit!("100"), lit!("%")]); + assert_eq!(parse_and_collect("100%% ok"), [lit!("100"), lit!("%"), sp!(" "), lit!("ok")]); + assert_eq!(parse_and_collect("%%PDF-1.0"), [lit!("%"), lit!("PDF-1.0")]); + assert_eq!(parse_and_collect("%Y-%m-%d"), [num0!(Year), lit!("-"), num0!(Month), lit!("-"), + num0!(Day)]); + assert_eq!(parse_and_collect("[%F]"), parse_and_collect("[%Y-%m-%d]")); + assert_eq!(parse_and_collect("%m %d"), [num0!(Month), sp!(" "), num0!(Day)]); + assert_eq!(parse_and_collect("%"), [Item::Error]); + assert_eq!(parse_and_collect("%%"), [lit!("%")]); + assert_eq!(parse_and_collect("%%%"), [Item::Error]); + assert_eq!(parse_and_collect("%%%%"), [lit!("%"), lit!("%")]); + assert_eq!(parse_and_collect("foo%?"), [Item::Error]); + assert_eq!(parse_and_collect("bar%42"), [Item::Error]); + assert_eq!(parse_and_collect("quux% +"), [Item::Error]); + assert_eq!(parse_and_collect("%.Z"), [Item::Error]); + assert_eq!(parse_and_collect("%:Z"), [Item::Error]); + assert_eq!(parse_and_collect("%-Z"), [Item::Error]); + assert_eq!(parse_and_collect("%0Z"), [Item::Error]); + assert_eq!(parse_and_collect("%_Z"), [Item::Error]); + assert_eq!(parse_and_collect("%.j"), [Item::Error]); + assert_eq!(parse_and_collect("%:j"), [Item::Error]); + assert_eq!(parse_and_collect("%-j"), [num!(Ordinal)]); + assert_eq!(parse_and_collect("%0j"), [num0!(Ordinal)]); + assert_eq!(parse_and_collect("%_j"), [nums!(Ordinal)]); + assert_eq!(parse_and_collect("%.e"), [Item::Error]); + assert_eq!(parse_and_collect("%:e"), [Item::Error]); + assert_eq!(parse_and_collect("%-e"), [num!(Day)]); + assert_eq!(parse_and_collect("%0e"), [num0!(Day)]); + assert_eq!(parse_and_collect("%_e"), [nums!(Day)]); +} + +#[cfg(test)] +#[test] +fn test_strftime_docs() { + use {FixedOffset, TimeZone}; + + let dt = FixedOffset::east(34200).ymd(2001, 7, 8).and_hms_nano(0, 34, 59, 1_026_490_000); + + // date specifiers + assert_eq!(dt.format("%Y").to_string(), "2001"); + assert_eq!(dt.format("%C").to_string(), "20"); + assert_eq!(dt.format("%y").to_string(), "01"); + assert_eq!(dt.format("%m").to_string(), "07"); + assert_eq!(dt.format("%b").to_string(), "Jul"); + assert_eq!(dt.format("%B").to_string(), "July"); + assert_eq!(dt.format("%h").to_string(), "Jul"); + assert_eq!(dt.format("%d").to_string(), "08"); + assert_eq!(dt.format("%e").to_string(), " 8"); + assert_eq!(dt.format("%e").to_string(), dt.format("%_d").to_string()); + assert_eq!(dt.format("%a").to_string(), "Sun"); + assert_eq!(dt.format("%A").to_string(), "Sunday"); + assert_eq!(dt.format("%w").to_string(), "0"); + assert_eq!(dt.format("%u").to_string(), "7"); + assert_eq!(dt.format("%U").to_string(), "28"); + assert_eq!(dt.format("%W").to_string(), "27"); + assert_eq!(dt.format("%G").to_string(), "2001"); + assert_eq!(dt.format("%g").to_string(), "01"); + assert_eq!(dt.format("%V").to_string(), "27"); + assert_eq!(dt.format("%j").to_string(), "189"); + assert_eq!(dt.format("%D").to_string(), "07/08/01"); + assert_eq!(dt.format("%x").to_string(), "07/08/01"); + assert_eq!(dt.format("%F").to_string(), "2001-07-08"); + assert_eq!(dt.format("%v").to_string(), " 8-Jul-2001"); + + // time specifiers + assert_eq!(dt.format("%H").to_string(), "00"); + assert_eq!(dt.format("%k").to_string(), " 0"); + assert_eq!(dt.format("%k").to_string(), dt.format("%_H").to_string()); + assert_eq!(dt.format("%I").to_string(), "12"); + assert_eq!(dt.format("%l").to_string(), "12"); + assert_eq!(dt.format("%l").to_string(), dt.format("%_I").to_string()); + assert_eq!(dt.format("%P").to_string(), "am"); + assert_eq!(dt.format("%p").to_string(), "AM"); + assert_eq!(dt.format("%M").to_string(), "34"); + assert_eq!(dt.format("%S").to_string(), "60"); + assert_eq!(dt.format("%f").to_string(), "026490000"); + assert_eq!(dt.format("%.f").to_string(), ".026490"); + assert_eq!(dt.format("%.3f").to_string(), ".026"); + assert_eq!(dt.format("%.6f").to_string(), ".026490"); + assert_eq!(dt.format("%.9f").to_string(), ".026490000"); + assert_eq!(dt.format("%R").to_string(), "00:34"); + assert_eq!(dt.format("%T").to_string(), "00:34:60"); + assert_eq!(dt.format("%X").to_string(), "00:34:60"); + assert_eq!(dt.format("%r").to_string(), "12:34:60 AM"); + + // time zone specifiers + //assert_eq!(dt.format("%Z").to_string(), "ACST"); + assert_eq!(dt.format("%z").to_string(), "+0930"); + assert_eq!(dt.format("%:z").to_string(), "+09:30"); + + // date & time specifiers + assert_eq!(dt.format("%c").to_string(), "Sun Jul 8 00:34:60 2001"); + assert_eq!(dt.format("%+").to_string(), "2001-07-08T00:34:60.026490+09:30"); + assert_eq!(dt.format("%s").to_string(), "994518299"); + + // special specifiers + assert_eq!(dt.format("%t").to_string(), "\t"); + assert_eq!(dt.format("%n").to_string(), "\n"); + assert_eq!(dt.format("%%").to_string(), "%"); +} diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/lib.rs cargo-0.19.0/vendor/chrono-0.2.25/src/lib.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/lib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,732 @@ +// This is a part of rust-chrono. +// Copyright (c) 2014-2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +//! # Chrono 0.2.25 +//! +//! Date and time handling for Rust. (also known as `rust-chrono`) +//! It aims to be a feature-complete superset of +//! the [time](https://github.com/rust-lang-deprecated/time) library. +//! In particular, +//! +//! * Chrono strictly adheres to ISO 8601. +//! * Chrono is timezone-aware by default, with separate timezone-naive types. +//! * Chrono is space-optimal and (while not being the primary goal) reasonably efficient. +//! +//! There were several previous attempts to bring a good date and time library to Rust, +//! which Chrono builts upon and should acknowledge: +//! +//! * [Initial research on +//! the wiki](https://github.com/rust-lang/rust-wiki-backup/blob/master/Lib-datetime.md) +//! * Dietrich Epp's [datetime-rs](https://github.com/depp/datetime-rs) +//! * Luis de Bethencourt's [rust-datetime](https://github.com/luisbg/rust-datetime) +//! +//! ## Usage +//! +//! Put this in your `Cargo.toml`: +//! +//! ```toml +//! [dependencies] +//! chrono = "0.2" +//! ``` +//! +//! Or, if you want [Serde](https://github.com/serde-rs/serde) or +//! [rustc-serialize](https://github.com/rust-lang-nursery/rustc-serialize) support, +//! include the features like this: +//! +//! ```toml +//! [dependencies] +//! chrono = { version = "0.2", features = ["serde", "rustc-serialize"] } +//! ``` +//! +//! Then put this in your crate root: +//! +//! ```rust +//! extern crate chrono; +//! ``` +//! +//! ## Overview +//! +//! ### Duration +//! +//! [**`Duration`**](./struct.Duration.html) +//! represents the magnitude of a time span. `Duration` used to be provided by Chrono. +//! It has been moved to the `time` crate as the +//! [`time::Duration`](https://doc.rust-lang.org/time/time/struct.Duration.html) type, but is +//! still re-exported from Chrono. +//! +//! ### Date and Time +//! +//! Chrono provides a +//! [**`DateTime`**](./datetime/struct.DateTime.html) +//! type to represent a date and a time in a timezone. +//! +//! For more abstract moment-in-time tracking such as internal timekeeping +//! that is unconcerned with timezones, consider +//! [`time::SystemTime`](https://doc.rust-lang.org/std/time/struct.SystemTime.html), +//! which tracks your system clock, or +//! [`time::Instant`](https://doc.rust-lang.org/std/time/struct.Instant.html), which +//! is an opaque but monotonically-increasing representation of a moment in time. +//! +//! `DateTime` is timezone-aware and must be constructed from +//! the [**`TimeZone`**](./offset/trait.TimeZone.html) object, +//! which defines how the local date is converted to and back from the UTC date. +//! There are three well-known `TimeZone` implementations: +//! +//! * [**`UTC`**](./offset/utc/struct.UTC.html) specifies the UTC time zone. It is most efficient. +//! +//! * [**`Local`**](./offset/local/struct.Local.html) specifies the system local time zone. +//! +//! * [**`FixedOffset`**](./offset/fixed/struct.FixedOffset.html) specifies +//! an arbitrary, fixed time zone such as UTC+09:00 or UTC-10:30. +//! This often results from the parsed textual date and time. +//! Since it stores the most information and does not depend on the system environment, +//! you would want to normalize other `TimeZone`s into this type. +//! +//! `DateTime`s with different `TimeZone` types are distinct and do not mix, +//! but can be converted to each other using +//! the [`DateTime::with_timezone`](./datetime/struct.DateTime.html#method.with_timezone) method. +//! +//! You can get the current date and time in the UTC time zone +//! ([`UTC::now()`](./offset/utc/struct.UTC.html#method.now)) +//! or in the local time zone +//! ([`Local::now()`](./offset/local/struct.Local.html#method.now)). +//! +//! ~~~~ {.rust} +//! use chrono::*; +//! +//! let utc: DateTime = UTC::now(); // e.g. `2014-11-28T12:45:59.324310806Z` +//! let local: DateTime = Local::now(); // e.g. `2014-11-28T21:45:59.324310806+09:00` +//! # let _ = utc; let _ = local; +//! ~~~~ +//! +//! Alternatively, you can create your own date and time. +//! This is a bit verbose due to Rust's lack of function and method overloading, +//! but in turn we get a rich combination of initialization methods. +//! +//! ~~~~ {.rust} +//! use chrono::*; +//! +//! let dt = UTC.ymd(2014, 7, 8).and_hms(9, 10, 11); // `2014-07-08T09:10:11Z` +//! // July 8 is 188th day of the year 2014 (`o` for "ordinal") +//! assert_eq!(dt, UTC.yo(2014, 189).and_hms(9, 10, 11)); +//! // July 8 is Tuesday in ISO week 28 of the year 2014. +//! assert_eq!(dt, UTC.isoywd(2014, 28, Weekday::Tue).and_hms(9, 10, 11)); +//! +//! let dt = UTC.ymd(2014, 7, 8).and_hms_milli(9, 10, 11, 12); // `2014-07-08T09:10:11.012Z` +//! assert_eq!(dt, UTC.ymd(2014, 7, 8).and_hms_micro(9, 10, 11, 12_000)); +//! assert_eq!(dt, UTC.ymd(2014, 7, 8).and_hms_nano(9, 10, 11, 12_000_000)); +//! +//! // dynamic verification +//! assert_eq!(UTC.ymd_opt(2014, 7, 8).and_hms_opt(21, 15, 33), +//! LocalResult::Single(UTC.ymd(2014, 7, 8).and_hms(21, 15, 33))); +//! assert_eq!(UTC.ymd_opt(2014, 7, 8).and_hms_opt(80, 15, 33), LocalResult::None); +//! assert_eq!(UTC.ymd_opt(2014, 7, 38).and_hms_opt(21, 15, 33), LocalResult::None); +//! +//! // other time zone objects can be used to construct a local datetime. +//! // obviously, `local_dt` is normally different from `dt`, but `fixed_dt` should be identical. +//! let local_dt = Local.ymd(2014, 7, 8).and_hms_milli(9, 10, 11, 12); +//! let fixed_dt = FixedOffset::east(9 * 3600).ymd(2014, 7, 8).and_hms_milli(18, 10, 11, 12); +//! assert_eq!(dt, fixed_dt); +//! # let _ = local_dt; +//! ~~~~ +//! +//! Various properties are available to the date and time, and can be altered individually. +//! Most of them are defined in the traits [`Datelike`](./trait.Datelike.html) and +//! [`Timelike`](./trait.Timelike.html) which you should `use` before. +//! Addition and subtraction is also supported. +//! The following illustrates most supported operations to the date and time: +//! +//! ~~~~ {.rust} +//! use chrono::*; +//! +//! # /* we intentionally fake the datetime... +//! // assume this returned `2014-11-28T21:45:59.324310806+09:00`: +//! let dt = Local::now(); +//! # */ // up to here. we now define a fixed datetime for the illustrative purpose. +//! # let dt = FixedOffset::east(9*3600).ymd(2014, 11, 28).and_hms_nano(21, 45, 59, 324310806); +//! +//! // property accessors +//! assert_eq!((dt.year(), dt.month(), dt.day()), (2014, 11, 28)); +//! assert_eq!((dt.month0(), dt.day0()), (10, 27)); // for unfortunate souls +//! assert_eq!((dt.hour(), dt.minute(), dt.second()), (21, 45, 59)); +//! assert_eq!(dt.weekday(), Weekday::Fri); +//! assert_eq!(dt.weekday().number_from_monday(), 5); // Mon=1, ..., Sat=7 +//! assert_eq!(dt.ordinal(), 332); // the day of year +//! assert_eq!(dt.num_days_from_ce(), 735565); // the number of days from and including Jan 1, 1 +//! +//! // time zone accessor and manipulation +//! assert_eq!(dt.offset().local_minus_utc(), Duration::hours(9)); +//! assert_eq!(dt.timezone(), FixedOffset::east(9 * 3600)); +//! assert_eq!(dt.with_timezone(&UTC), UTC.ymd(2014, 11, 28).and_hms_nano(12, 45, 59, 324310806)); +//! +//! // a sample of property manipulations (validates dynamically) +//! assert_eq!(dt.with_day(29).unwrap().weekday(), Weekday::Sat); // 2014-11-29 is Saturday +//! assert_eq!(dt.with_day(32), None); +//! assert_eq!(dt.with_year(-300).unwrap().num_days_from_ce(), -109606); // November 29, 301 BCE +//! +//! // arithmetic operations +//! assert_eq!(UTC.ymd(2014, 11, 14).and_hms(8, 9, 10) - UTC.ymd(2014, 11, 14).and_hms(10, 9, 8), +//! Duration::seconds(-2 * 3600 + 2)); +//! assert_eq!(UTC.ymd(1970, 1, 1).and_hms(0, 0, 0) + Duration::seconds(1_000_000_000), +//! UTC.ymd(2001, 9, 9).and_hms(1, 46, 40)); +//! assert_eq!(UTC.ymd(1970, 1, 1).and_hms(0, 0, 0) - Duration::seconds(1_000_000_000), +//! UTC.ymd(1938, 4, 24).and_hms(22, 13, 20)); +//! ~~~~ +//! +//! Formatting is done via the [`format`](./datetime/struct.DateTime.html#method.format) method, +//! which format is equivalent to the familiar `strftime` format. +//! (See the [`format::strftime` module documentation](./format/strftime/index.html#specifiers) +//! for full syntax.) +//! +//! The default `to_string` method and `{:?}` specifier also give a reasonable representation. +//! Chrono also provides [`to_rfc2822`](./datetime/struct.DateTime.html#method.to_rfc2822) and +//! [`to_rfc3339`](./datetime/struct.DateTime.html#method.to_rfc3339) methods +//! for well-known formats. +//! +//! ~~~~ {.rust} +//! use chrono::*; +//! +//! let dt = UTC.ymd(2014, 11, 28).and_hms(12, 0, 9); +//! assert_eq!(dt.format("%Y-%m-%d %H:%M:%S").to_string(), "2014-11-28 12:00:09"); +//! assert_eq!(dt.format("%a %b %e %T %Y").to_string(), "Fri Nov 28 12:00:09 2014"); +//! assert_eq!(dt.format("%a %b %e %T %Y").to_string(), dt.format("%c").to_string()); +//! +//! assert_eq!(dt.to_string(), "2014-11-28 12:00:09 UTC"); +//! assert_eq!(dt.to_rfc2822(), "Fri, 28 Nov 2014 12:00:09 +0000"); +//! assert_eq!(dt.to_rfc3339(), "2014-11-28T12:00:09+00:00"); +//! assert_eq!(format!("{:?}", dt), "2014-11-28T12:00:09Z"); +//! ~~~~ +//! +//! Parsing can be done with three methods: +//! +//! 1. The standard [`FromStr`](https://doc.rust-lang.org/std/str/trait.FromStr.html) trait +//! (and [`parse`](https://doc.rust-lang.org/std/primitive.str.html#method.parse) method +//! on a string) can be used for parsing `DateTime`, `DateTime` and +//! `DateTime` values. This parses what the `{:?}` +//! ([`std::fmt::Debug`](https://doc.rust-lang.org/std/fmt/trait.Debug.html)) +//! format specifier prints, and requires the offset to be present. +//! +//! 2. [`DateTime::parse_from_str`](./datetime/struct.DateTime.html#method.parse_from_str) parses +//! a date and time with offsets and returns `DateTime`. +//! This should be used when the offset is a part of input and the caller cannot guess that. +//! It *cannot* be used when the offset can be missing. +//! [`DateTime::parse_from_rfc2822`](./datetime/struct.DateTime.html#method.parse_from_rfc2822) +//! and +//! [`DateTime::parse_from_rfc3339`](./datetime/struct.DateTime.html#method.parse_from_rfc3339) +//! are similar but for well-known formats. +//! +//! 3. [`Offset::datetime_from_str`](./offset/trait.TimeZone.html#method.datetime_from_str) is +//! similar but returns `DateTime` of given offset. +//! When the explicit offset is missing from the input, it simply uses given offset. +//! It issues an error when the input contains an explicit offset different +//! from the current offset. +//! +//! More detailed control over the parsing process is available via +//! [`format`](./format/index.html) module. +//! +//! ~~~~ {.rust} +//! use chrono::*; +//! +//! let dt = UTC.ymd(2014, 11, 28).and_hms(12, 0, 9); +//! let fixed_dt = dt.with_timezone(&FixedOffset::east(9*3600)); +//! +//! // method 1 +//! assert_eq!("2014-11-28T12:00:09Z".parse::>(), Ok(dt.clone())); +//! assert_eq!("2014-11-28T21:00:09+09:00".parse::>(), Ok(dt.clone())); +//! assert_eq!("2014-11-28T21:00:09+09:00".parse::>(), Ok(fixed_dt.clone())); +//! +//! // method 2 +//! assert_eq!(DateTime::parse_from_str("2014-11-28 21:00:09 +09:00", "%Y-%m-%d %H:%M:%S %z"), +//! Ok(fixed_dt.clone())); +//! assert_eq!(DateTime::parse_from_rfc2822("Fri, 28 Nov 2014 21:00:09 +0900"), +//! Ok(fixed_dt.clone())); +//! assert_eq!(DateTime::parse_from_rfc3339("2014-11-28T21:00:09+09:00"), Ok(fixed_dt.clone())); +//! +//! // method 3 +//! assert_eq!(UTC.datetime_from_str("2014-11-28 12:00:09", "%Y-%m-%d %H:%M:%S"), Ok(dt.clone())); +//! assert_eq!(UTC.datetime_from_str("Fri Nov 28 12:00:09 2014", "%a %b %e %T %Y"), Ok(dt.clone())); +//! +//! // oops, the year is missing! +//! assert!(UTC.datetime_from_str("Fri Nov 28 12:00:09", "%a %b %e %T %Y").is_err()); +//! // oops, the format string does not include the year at all! +//! assert!(UTC.datetime_from_str("Fri Nov 28 12:00:09", "%a %b %e %T").is_err()); +//! // oops, the weekday is incorrect! +//! assert!(UTC.datetime_from_str("Sat Nov 28 12:00:09 2014", "%a %b %e %T %Y").is_err()); +//! ~~~~ +//! +//! ### Individual date +//! +//! Chrono also provides an individual date type ([**`Date`**](./date/struct.Date.html)). +//! It also has time zones attached, and have to be constructed via time zones. +//! Most operations available to `DateTime` are also available to `Date` whenever appropriate. +//! +//! ~~~~ {.rust} +//! use chrono::*; +//! +//! # // these *may* fail, but only very rarely. just rerun the test if you were that unfortunate ;) +//! assert_eq!(UTC::today(), UTC::now().date()); +//! assert_eq!(Local::today(), Local::now().date()); +//! +//! assert_eq!(UTC.ymd(2014, 11, 28).weekday(), Weekday::Fri); +//! assert_eq!(UTC.ymd_opt(2014, 11, 31), LocalResult::None); +//! assert_eq!(UTC.ymd(2014, 11, 28).and_hms_milli(7, 8, 9, 10).format("%H%M%S").to_string(), +//! "070809"); +//! ~~~~ +//! +//! There is no timezone-aware `Time` due to the lack of usefulness and also the complexity. +//! +//! `DateTime` has [`date`](./datetime/struct.DateTime.html#method.date) method +//! which returns a `Date` which represents its date component. +//! There is also a [`time`](./datetime/struct.DateTime.html#method.time) method, +//! which simply returns a naive local time described below. +//! +//! ### Naive date and time +//! +//! Chrono provides naive counterparts to `Date`, (non-existent) `Time` and `DateTime` +//! as [**`NaiveDate`**](./naive/date/struct.NaiveDate.html), +//! [**`NaiveTime`**](./naive/time/struct.NaiveTime.html) and +//! [**`NaiveDateTime`**](./naive/datetime/struct.NaiveDateTime.html) respectively. +//! +//! They have almost equivalent interfaces as their timezone-aware twins, +//! but are not associated to time zones obviously and can be quite low-level. +//! They are mostly useful for building blocks for higher-level types. +//! +//! Timezone-aware `DateTime` and `Date` types have two methods returning naive versions: +//! [`naive_local`](./datetime/struct.DateTime.html#method.naive_local) returns +//! a view to the naive local time, +//! and [`naive_utc`](./datetime/struct.DateTime.html#method.naive_utc) returns +//! a view to the naive UTC time. +//! +//! ## Limitations +//! +//! Only proleptic Gregorian calendar (i.e. extended to support older dates) is supported. +//! Be very careful if you really have to deal with pre-20C dates, they can be in Julian or others. +//! +//! Date types are limited in about +/- 262,000 years from the common epoch. +//! Time types are limited in the nanosecond accuracy. +//! +//! [Leap seconds are supported in the representation but +//! Chrono doesn't try to make use of them](./naive/time/index.html#leap-second-handling). +//! (The main reason is that leap seconds are not really predictable.) +//! Almost *every* operation over the possible leap seconds will ignore them. +//! Consider using `NaiveDateTime` with the implicit TAI (International Atomic Time) scale +//! if you want. +//! +//! Chrono inherently does not support an inaccurate or partial date and time representation. +//! Any operation that can be ambiguous will return `None` in such cases. +//! For example, "a month later" of 2014-01-30 is not well-defined +//! and consequently `UTC.ymd(2014, 1, 30).with_month(2)` returns `None`. +//! +//! Advanced time zone handling is not yet supported (but is planned in 0.3). + +#![doc(html_root_url = "https://lifthrasiir.github.io/rust-chrono/")] + +#![cfg_attr(bench, feature(test))] // lib stability features as per RFC #507 +#![deny(missing_docs)] + +extern crate time as stdtime; +extern crate num; +#[cfg(feature = "rustc-serialize")] +extern crate rustc_serialize; +#[cfg(feature = "serde")] +extern crate serde; + +pub use duration::Duration; +pub use offset::{TimeZone, Offset, LocalResult}; +pub use offset::utc::UTC; +pub use offset::fixed::FixedOffset; +pub use offset::local::Local; +pub use naive::date::NaiveDate; +pub use naive::time::NaiveTime; +pub use naive::datetime::NaiveDateTime; +pub use date::Date; +pub use datetime::DateTime; +pub use format::{ParseError, ParseResult}; + +// useful throughout the codebase +macro_rules! try_opt { + ($e:expr) => (match $e { Some(v) => v, None => return None }) +} + +mod div; +pub mod duration { + //! ISO 8601 duration. + //! + //! This used to be a part of rust-chrono, + //! but has been subsequently merged into Rust's standard library. + pub use stdtime::Duration; +} +pub mod offset; +pub mod naive { + //! Date and time types which do not concern about the timezones. + //! + //! They are primarily building blocks for other types + //! (e.g. [`TimeZone`](../offset/trait.TimeZone.html)), + //! but can be also used for the simpler date and time handling. + pub mod date; + pub mod time; + pub mod datetime; +} +pub mod date; +pub mod datetime; +pub mod format; + +/// The day of week. +/// +/// The order of the days of week depends on the context. +/// (This is why this type does *not* implement `PartialOrd` or `Ord` traits.) +/// One should prefer `*_from_monday` or `*_from_sunday` methods to get the correct result. +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +#[cfg_attr(feature = "rustc-serialize", derive(RustcEncodable, RustcDecodable))] +pub enum Weekday { + /// Monday. + Mon = 0, + /// Tuesday. + Tue = 1, + /// Wednesday. + Wed = 2, + /// Thursday. + Thu = 3, + /// Friday. + Fri = 4, + /// Saturday. + Sat = 5, + /// Sunday. + Sun = 6, +} + +impl Weekday { + /// The next day in the week. + /// + /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` + /// ----------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- + /// `w.succ()`: | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` | `Mon` + #[inline] + pub fn succ(&self) -> Weekday { + match *self { + Weekday::Mon => Weekday::Tue, + Weekday::Tue => Weekday::Wed, + Weekday::Wed => Weekday::Thu, + Weekday::Thu => Weekday::Fri, + Weekday::Fri => Weekday::Sat, + Weekday::Sat => Weekday::Sun, + Weekday::Sun => Weekday::Mon, + } + } + + /// The previous day in the week. + /// + /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` + /// ----------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- + /// `w.pred()`: | `Sun` | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` + #[inline] + pub fn pred(&self) -> Weekday { + match *self { + Weekday::Mon => Weekday::Sun, + Weekday::Tue => Weekday::Mon, + Weekday::Wed => Weekday::Tue, + Weekday::Thu => Weekday::Wed, + Weekday::Fri => Weekday::Thu, + Weekday::Sat => Weekday::Fri, + Weekday::Sun => Weekday::Sat, + } + } + + /// Returns a day-of-week number starting from Monday = 1. (ISO 8601 weekday number) + /// + /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` + /// ------------------------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- + /// `w.number_from_monday()`: | 1 | 2 | 3 | 4 | 5 | 6 | 7 + #[inline] + pub fn number_from_monday(&self) -> u32 { + match *self { + Weekday::Mon => 1, + Weekday::Tue => 2, + Weekday::Wed => 3, + Weekday::Thu => 4, + Weekday::Fri => 5, + Weekday::Sat => 6, + Weekday::Sun => 7, + } + } + + /// Returns a day-of-week number starting from Sunday = 1. + /// + /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` + /// ------------------------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- + /// `w.number_from_sunday()`: | 2 | 3 | 4 | 5 | 6 | 7 | 1 + #[inline] + pub fn number_from_sunday(&self) -> u32 { + match *self { + Weekday::Mon => 2, + Weekday::Tue => 3, + Weekday::Wed => 4, + Weekday::Thu => 5, + Weekday::Fri => 6, + Weekday::Sat => 7, + Weekday::Sun => 1, + } + } + + /// Returns a day-of-week number starting from Monday = 0. + /// + /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` + /// --------------------------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- + /// `w.num_days_from_monday()`: | 0 | 1 | 2 | 3 | 4 | 5 | 6 + #[inline] + pub fn num_days_from_monday(&self) -> u32 { + match *self { + Weekday::Mon => 0, + Weekday::Tue => 1, + Weekday::Wed => 2, + Weekday::Thu => 3, + Weekday::Fri => 4, + Weekday::Sat => 5, + Weekday::Sun => 6, + } + } + + /// Returns a day-of-week number starting from Sunday = 0. + /// + /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` + /// --------------------------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- + /// `w.num_days_from_sunday()`: | 1 | 2 | 3 | 4 | 5 | 6 | 0 + #[inline] + pub fn num_days_from_sunday(&self) -> u32 { + match *self { + Weekday::Mon => 1, + Weekday::Tue => 2, + Weekday::Wed => 3, + Weekday::Thu => 4, + Weekday::Fri => 5, + Weekday::Sat => 6, + Weekday::Sun => 0, + } + } +} + +/// Any weekday can be represented as an integer from 0 to 6, which equals to +/// [`Weekday::num_days_from_monday`](#method.num_days_from_monday) in this implementation. +/// Do not heavily depend on this though; use explicit methods whenever possible. +impl num::traits::FromPrimitive for Weekday { + #[inline] + fn from_i64(n: i64) -> Option { + match n { + 0 => Some(Weekday::Mon), + 1 => Some(Weekday::Tue), + 2 => Some(Weekday::Wed), + 3 => Some(Weekday::Thu), + 4 => Some(Weekday::Fri), + 5 => Some(Weekday::Sat), + 6 => Some(Weekday::Sun), + _ => None, + } + } + + #[inline] + fn from_u64(n: u64) -> Option { + match n { + 0 => Some(Weekday::Mon), + 1 => Some(Weekday::Tue), + 2 => Some(Weekday::Wed), + 3 => Some(Weekday::Thu), + 4 => Some(Weekday::Fri), + 5 => Some(Weekday::Sat), + 6 => Some(Weekday::Sun), + _ => None, + } + } +} + + +/// The common set of methods for date component. +pub trait Datelike: Sized { + /// Returns the year number in the [calendar date](./naive/date/index.html#calendar-date). + fn year(&self) -> i32; + + /// Returns the absolute year number starting from 1 with a boolean flag, + /// which is false when the year predates the epoch (BCE/BC) and true otherwise (CE/AD). + #[inline] + fn year_ce(&self) -> (bool, u32) { + let year = self.year(); + if year < 1 { + (false, (1 - year) as u32) + } else { + (true, year as u32) + } + } + + /// Returns the month number starting from 1. + /// + /// The return value ranges from 1 to 12. + fn month(&self) -> u32; + + /// Returns the month number starting from 0. + /// + /// The return value ranges from 0 to 11. + fn month0(&self) -> u32; + + /// Returns the day of month starting from 1. + /// + /// The return value ranges from 1 to 31. (The last day of month differs by months.) + fn day(&self) -> u32; + + /// Returns the day of month starting from 0. + /// + /// The return value ranges from 0 to 30. (The last day of month differs by months.) + fn day0(&self) -> u32; + + /// Returns the day of year starting from 1. + /// + /// The return value ranges from 1 to 366. (The last day of year differs by years.) + fn ordinal(&self) -> u32; + + /// Returns the day of year starting from 0. + /// + /// The return value ranges from 0 to 365. (The last day of year differs by years.) + fn ordinal0(&self) -> u32; + + /// Returns the day of week. + fn weekday(&self) -> Weekday; + + /// Returns the ISO week date: an adjusted year, week number and day of week. + /// The adjusted year may differ from that of the calendar date. + fn isoweekdate(&self) -> (i32, u32, Weekday); + + /// Makes a new value with the year number changed. + /// + /// Returns `None` when the resulting value would be invalid. + fn with_year(&self, year: i32) -> Option; + + /// Makes a new value with the month number (starting from 1) changed. + /// + /// Returns `None` when the resulting value would be invalid. + fn with_month(&self, month: u32) -> Option; + + /// Makes a new value with the month number (starting from 0) changed. + /// + /// Returns `None` when the resulting value would be invalid. + fn with_month0(&self, month0: u32) -> Option; + + /// Makes a new value with the day of month (starting from 1) changed. + /// + /// Returns `None` when the resulting value would be invalid. + fn with_day(&self, day: u32) -> Option; + + /// Makes a new value with the day of month (starting from 0) changed. + /// + /// Returns `None` when the resulting value would be invalid. + fn with_day0(&self, day0: u32) -> Option; + + /// Makes a new value with the day of year (starting from 1) changed. + /// + /// Returns `None` when the resulting value would be invalid. + fn with_ordinal(&self, ordinal: u32) -> Option; + + /// Makes a new value with the day of year (starting from 0) changed. + /// + /// Returns `None` when the resulting value would be invalid. + fn with_ordinal0(&self, ordinal0: u32) -> Option; + + /// Returns the number of days since January 1, 1 (Day 1) in the proleptic Gregorian calendar. + fn num_days_from_ce(&self) -> i32 { + // we know this wouldn't overflow since year is limited to 1/2^13 of i32's full range. + let mut year = self.year() - 1; + let mut ndays = 0; + if year < 0 { + let excess = 1 + (-year) / 400; + year += excess * 400; + ndays -= excess * 146097; + } + let div_100 = year / 100; + ndays += ((year * 1461) >> 2) - div_100 + (div_100 >> 2); + ndays + self.ordinal() as i32 + } +} + +/// The common set of methods for time component. +pub trait Timelike: Sized { + /// Returns the hour number from 0 to 23. + fn hour(&self) -> u32; + + /// Returns the hour number from 1 to 12 with a boolean flag, + /// which is false for AM and true for PM. + #[inline] + fn hour12(&self) -> (bool, u32) { + let hour = self.hour(); + let mut hour12 = hour % 12; + if hour12 == 0 { + hour12 = 12; + } + (hour >= 12, hour12) + } + + /// Returns the minute number from 0 to 59. + fn minute(&self) -> u32; + + /// Returns the second number from 0 to 59. + fn second(&self) -> u32; + + /// Returns the number of nanoseconds since the whole non-leap second. + /// The range from 1,000,000,000 to 1,999,999,999 represents + /// the [leap second](./naive/time/index.html#leap-second-handling). + fn nanosecond(&self) -> u32; + + /// Makes a new value with the hour number changed. + /// + /// Returns `None` when the resulting value would be invalid. + fn with_hour(&self, hour: u32) -> Option; + + /// Makes a new value with the minute number changed. + /// + /// Returns `None` when the resulting value would be invalid. + fn with_minute(&self, min: u32) -> Option; + + /// Makes a new value with the second number changed. + /// + /// Returns `None` when the resulting value would be invalid. + /// As with the [`second`](#tymethod.second) method, + /// the input range is restricted to 0 through 59. + fn with_second(&self, sec: u32) -> Option; + + /// Makes a new value with nanoseconds since the whole non-leap second changed. + /// + /// Returns `None` when the resulting value would be invalid. + /// As with the [`nanosecond`](#tymethod.nanosecond) method, + /// the input range can exceed 1,000,000,000 for leap seconds. + fn with_nanosecond(&self, nano: u32) -> Option; + + /// Returns the number of non-leap seconds past the last midnight. + #[inline] + fn num_seconds_from_midnight(&self) -> u32 { + self.hour() * 3600 + self.minute() * 60 + self.second() + } +} + +#[test] +fn test_readme_doomsday() { + use num::iter::range_inclusive; + + for y in range_inclusive(naive::date::MIN.year(), naive::date::MAX.year()) { + // even months + let d4 = NaiveDate::from_ymd(y, 4, 4); + let d6 = NaiveDate::from_ymd(y, 6, 6); + let d8 = NaiveDate::from_ymd(y, 8, 8); + let d10 = NaiveDate::from_ymd(y, 10, 10); + let d12 = NaiveDate::from_ymd(y, 12, 12); + + // nine to five, seven-eleven + let d59 = NaiveDate::from_ymd(y, 5, 9); + let d95 = NaiveDate::from_ymd(y, 9, 5); + let d711 = NaiveDate::from_ymd(y, 7, 11); + let d117 = NaiveDate::from_ymd(y, 11, 7); + + // "March 0" + let d30 = NaiveDate::from_ymd(y, 3, 1).pred(); + + let weekday = d30.weekday(); + let other_dates = [d4, d6, d8, d10, d12, d59, d95, d711, d117]; + assert!(other_dates.iter().all(|d| d.weekday() == weekday)); + } +} diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/naive/date.rs cargo-0.19.0/vendor/chrono-0.2.25/src/naive/date.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/naive/date.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/naive/date.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,2806 @@ +// This is a part of rust-chrono. +// Copyright (c) 2014-2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +//! ISO 8601 calendar date without timezone. +//! +//! # Calendar Date +//! +//! The ISO 8601 **calendar date** follows the proleptic Gregorian calendar. +//! It is like a normal civil calendar but note some slight differences: +//! +//! * Dates before the Gregorian calendar's inception in 1582 are defined via the extrapolation. +//! Be careful, as historical dates are often noted in the Julian calendar and others +//! and the transition to Gregorian may differ across countries (as late as early 20C). +//! +//! (Some example: Both Shakespeare from Britain and Cervantes from Spain seemingly died +//! on the same calendar date---April 23, 1616---but in the different calendar. +//! Britain used the Julian calendar at that time, so Shakespeare's death is later.) +//! +//! * ISO 8601 calendars has the year 0, which is 1 BCE (a year before 1 CE). +//! If you need a typical BCE/BC and CE/AD notation for year numbers, +//! use the [`Datelike::year_ce`](../../trait.Datelike.html#method.year_ce) method. +//! +//! # Week Date +//! +//! The ISO 8601 **week date** is a triple of year number, week number +//! and [day of the week](../../enum.Weekday.html) with the following rules: +//! +//! * A week consists of Monday through Sunday, and is always numbered within some year. +//! The week number ranges from 1 to 52 or 53 depending on the year. +//! +//! * The week 1 of given year is defined as the first week containing January 4 of that year, +//! or equivalently, the first week containing four or more days in that year. +//! +//! * The year number in the week date may *not* correspond to the actual Gregorian year. +//! For example, January 3, 2016 (Sunday) was on the last (53rd) week of 2015. +//! +//! Chrono's date types default to the ISO 8601 [calendar date](#calendar-date), +//! but the [`Datelike::isoweekdate`](../../trait.Datelike.html#tymethod.isoweekdate) method +//! can be used to get the corresponding week date. +//! +//! # Ordinal Date +//! +//! The ISO 8601 **ordinal date** is a pair of year number and day of the year ("ordinal"). +//! The ordinal number ranges from 1 to 365 or 366 depending on the year. +//! The year number is same to that of the [calendar date](#calendar-date). +//! +//! This is currently the internal format of Chrono's date types. + +use std::{str, fmt, hash}; +use std::ops::{Add, Sub}; +use num::traits::ToPrimitive; + +use {Weekday, Datelike}; +use div::div_mod_floor; +use duration::Duration; +use naive::time::NaiveTime; +use naive::datetime::NaiveDateTime; +use format::{Item, Numeric, Pad}; +use format::{parse, Parsed, ParseError, ParseResult, DelayedFormat, StrftimeItems}; + +use self::internals::{DateImpl, Of, Mdf, YearFlags}; + +const MAX_YEAR: i32 = internals::MAX_YEAR; +const MIN_YEAR: i32 = internals::MIN_YEAR; + +// MAX_YEAR-12-31 minus 0000-01-01 +// = ((MAX_YEAR+1)-01-01 minus 0001-01-01) + (0001-01-01 minus 0000-01-01) - 1 day +// = ((MAX_YEAR+1)-01-01 minus 0001-01-01) + 365 days +// = MAX_YEAR * 365 + (# of leap years from 0001 to MAX_YEAR) + 365 days +#[cfg(test)] // only used for testing +const MAX_DAYS_FROM_YEAR_0: i32 = MAX_YEAR * 365 + + MAX_YEAR / 4 - + MAX_YEAR / 100 + + MAX_YEAR / 400 + 365; + +// MIN_YEAR-01-01 minus 0000-01-01 +// = (MIN_YEAR+400n+1)-01-01 minus (400n+1)-01-01 +// = ((MIN_YEAR+400n+1)-01-01 minus 0001-01-01) - ((400n+1)-01-01 minus 0001-01-01) +// = ((MIN_YEAR+400n+1)-01-01 minus 0001-01-01) - 146097n days +// +// n is set to 1000 for convenience. +#[cfg(test)] // only used for testing +const MIN_DAYS_FROM_YEAR_0: i32 = (MIN_YEAR + 400_000) * 365 + + (MIN_YEAR + 400_000) / 4 - + (MIN_YEAR + 400_000) / 100 + + (MIN_YEAR + 400_000) / 400 - 146097_000; + +/// ISO 8601 calendar date without timezone. +/// Allows for every [proleptic Gregorian date](./index.html#calendar-date) +/// from Jan 1, 262145 BCE to Dec 31, 262143 CE. +/// Also supports the conversion from ISO 8601 ordinal and week date. +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] +pub struct NaiveDate { + ymdf: DateImpl, // (year << 13) | of +} + +/// The minimum possible `NaiveDate` (January 1, 262145 BCE). +pub const MIN: NaiveDate = NaiveDate { ymdf: (MIN_YEAR << 13) | (1 << 4) | 0o07 /*FE*/ }; +/// The maximum possible `NaiveDate` (December 31, 262143 CE). +pub const MAX: NaiveDate = NaiveDate { ymdf: (MAX_YEAR << 13) | (365 << 4) | 0o17 /*F*/ }; + +// as it is hard to verify year flags in `MIN` and `MAX`, we use a separate run-time test. +#[test] +fn test_date_bounds() { + let calculated_min = NaiveDate::from_ymd(MIN_YEAR, 1, 1); + let calculated_max = NaiveDate::from_ymd(MAX_YEAR, 12, 31); + assert!(MIN == calculated_min, + "`MIN` should have a year flag {:?}", calculated_min.of().flags()); + assert!(MAX == calculated_max, + "`MAX` should have a year flag {:?}", calculated_max.of().flags()); +} + +impl NaiveDate { + /// Makes a new `NaiveDate` from year and packed ordinal-flags, with a verification. + fn from_of(year: i32, of: Of) -> Option { + if year >= MIN_YEAR && year <= MAX_YEAR && of.valid() { + let Of(of) = of; + Some(NaiveDate { ymdf: (year << 13) | (of as DateImpl) }) + } else { + None + } + } + + /// Makes a new `NaiveDate` from year and packed month-day-flags, with a verification. + fn from_mdf(year: i32, mdf: Mdf) -> Option { + NaiveDate::from_of(year, mdf.to_of()) + } + + /// Makes a new `NaiveDate` from the serialized representation. + /// Used for serialization formats. + #[cfg(feature = "rustc-serialize")] + fn from_serialized(ymdf: i32) -> Option { + // check if the year flag is correct + if (ymdf & 0b1111) as u8 != YearFlags::from_year(ymdf >> 13).0 { return None; } + + // check if the ordinal is in the range + let date = NaiveDate { ymdf: ymdf }; + if !date.of().valid() { return None; } + + Some(date) + } + + /// Returns a serialized representation of this `NaiveDate`. + #[cfg(feature = "rustc-serialize")] + fn to_serialized(&self) -> i32 { + self.ymdf + } + + /// Makes a new `NaiveDate` from the [calendar date](./index.html#calendar-date) + /// (year, month and day). + /// + /// Panics on the out-of-range date, invalid month and/or day. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike, Weekday}; + /// + /// let d = NaiveDate::from_ymd(2015, 3, 14); + /// assert_eq!(d.year(), 2015); + /// assert_eq!(d.month(), 3); + /// assert_eq!(d.day(), 14); + /// assert_eq!(d.ordinal(), 73); // day of year + /// assert_eq!(d.isoweekdate(), (2015, 11, Weekday::Sat)); // ISO week and weekday + /// assert_eq!(d.num_days_from_ce(), 735671); // days since January 1, 1 CE + /// ~~~~ + pub fn from_ymd(year: i32, month: u32, day: u32) -> NaiveDate { + NaiveDate::from_ymd_opt(year, month, day).expect("invalid or out-of-range date") + } + + /// Makes a new `NaiveDate` from the [calendar date](./index.html#calendar-date) + /// (year, month and day). + /// + /// Returns `None` on the out-of-range date, invalid month and/or day. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// let ymd = |y,m,d| NaiveDate::from_ymd_opt(y, m, d); + /// assert!(ymd(2015, 3, 14).is_some()); + /// assert!(ymd(2015, 0, 14).is_none()); + /// assert!(ymd(2015, 2, 29).is_none()); + /// assert!(ymd(-4, 2, 29).is_some()); // 5 BCE is a leap year + /// assert!(ymd(400000, 1, 1).is_none()); + /// assert!(ymd(-400000, 1, 1).is_none()); + /// ~~~~ + pub fn from_ymd_opt(year: i32, month: u32, day: u32) -> Option { + let flags = YearFlags::from_year(year); + NaiveDate::from_mdf(year, Mdf::new(month, day, flags)) + } + + /// Makes a new `NaiveDate` from the [ordinal date](./index.html#ordinal-date) + /// (year and day of the year). + /// + /// Panics on the out-of-range date and/or invalid day of year. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike, Weekday}; + /// + /// let d = NaiveDate::from_yo(2015, 73); + /// assert_eq!(d.ordinal(), 73); + /// assert_eq!(d.year(), 2015); + /// assert_eq!(d.month(), 3); + /// assert_eq!(d.day(), 14); + /// assert_eq!(d.isoweekdate(), (2015, 11, Weekday::Sat)); // ISO week and weekday + /// assert_eq!(d.num_days_from_ce(), 735671); // days since January 1, 1 CE + /// ~~~~ + pub fn from_yo(year: i32, ordinal: u32) -> NaiveDate { + NaiveDate::from_yo_opt(year, ordinal).expect("invalid or out-of-range date") + } + + /// Makes a new `NaiveDate` from the [ordinal date](./index.html#ordinal-date) + /// (year and day of the year). + /// + /// Returns `None` on the out-of-range date and/or invalid day of year. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// let yo = |y,o| NaiveDate::from_yo_opt(y, o); + /// assert!(yo(2015, 100).is_some()); + /// assert!(yo(2015, 0).is_none()); + /// assert!(yo(2015, 365).is_some()); + /// assert!(yo(2015, 366).is_none()); + /// assert!(yo(-4, 366).is_some()); // 5 BCE is a leap year + /// assert!(yo(400000, 1).is_none()); + /// assert!(yo(-400000, 1).is_none()); + /// ~~~~ + pub fn from_yo_opt(year: i32, ordinal: u32) -> Option { + let flags = YearFlags::from_year(year); + NaiveDate::from_of(year, Of::new(ordinal, flags)) + } + + /// Makes a new `NaiveDate` from the [ISO week date](./index.html#week-date) + /// (year, week number and day of the week). + /// The resulting `NaiveDate` may have a different year from the input year. + /// + /// Panics on the out-of-range date and/or invalid week number. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike, Weekday}; + /// + /// let d = NaiveDate::from_isoywd(2015, 11, Weekday::Sat); + /// assert_eq!(d.isoweekdate(), (2015, 11, Weekday::Sat)); + /// assert_eq!(d.year(), 2015); + /// assert_eq!(d.month(), 3); + /// assert_eq!(d.day(), 14); + /// assert_eq!(d.ordinal(), 73); // day of year + /// assert_eq!(d.num_days_from_ce(), 735671); // days since January 1, 1 CE + /// ~~~~ + pub fn from_isoywd(year: i32, week: u32, weekday: Weekday) -> NaiveDate { + NaiveDate::from_isoywd_opt(year, week, weekday).expect("invalid or out-of-range date") + } + + /// Makes a new `NaiveDate` from the [ISO week date](./index.html#week-date) + /// (year, week number and day of the week). + /// The resulting `NaiveDate` may have a different year from the input year. + /// + /// Returns `None` on the out-of-range date and/or invalid week number. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Weekday}; + /// + /// let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d); + /// let isoywd = |y,w,d| NaiveDate::from_isoywd_opt(y, w, d); + /// + /// assert_eq!(isoywd(2015, 0, Weekday::Sun), None); + /// assert_eq!(isoywd(2015, 10, Weekday::Sun), Some(ymd(2015, 3, 8))); + /// assert_eq!(isoywd(2015, 30, Weekday::Mon), Some(ymd(2015, 7, 20))); + /// assert_eq!(isoywd(2015, 60, Weekday::Mon), None); + /// + /// assert_eq!(isoywd(400000, 10, Weekday::Fri), None); + /// assert_eq!(isoywd(-400000, 10, Weekday::Sat), None); + /// ~~~~ + /// + /// The year number of ISO week date may differ from that of the calendar date. + /// + /// ~~~~ + /// # use chrono::{NaiveDate, Weekday}; + /// # let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d); + /// # let isoywd = |y,w,d| NaiveDate::from_isoywd_opt(y, w, d); + /// // Mo Tu We Th Fr Sa Su + /// // 2014-W52 22 23 24 25 26 27 28 has 4+ days of new year, + /// // 2015-W01 29 30 31 1 2 3 4 <- so this is the first week + /// assert_eq!(isoywd(2014, 52, Weekday::Sun), Some(ymd(2014, 12, 28))); + /// assert_eq!(isoywd(2014, 53, Weekday::Mon), None); + /// assert_eq!(isoywd(2015, 1, Weekday::Mon), Some(ymd(2014, 12, 29))); + /// + /// // 2015-W52 21 22 23 24 25 26 27 has 4+ days of old year, + /// // 2015-W53 28 29 30 31 1 2 3 <- so this is the last week + /// // 2016-W01 4 5 6 7 8 9 10 + /// assert_eq!(isoywd(2015, 52, Weekday::Sun), Some(ymd(2015, 12, 27))); + /// assert_eq!(isoywd(2015, 53, Weekday::Sun), Some(ymd(2016, 1, 3))); + /// assert_eq!(isoywd(2015, 54, Weekday::Mon), None); + /// assert_eq!(isoywd(2016, 1, Weekday::Mon), Some(ymd(2016, 1, 4))); + /// ~~~~ + pub fn from_isoywd_opt(year: i32, week: u32, weekday: Weekday) -> Option { + let flags = YearFlags::from_year(year); + let nweeks = flags.nisoweeks(); + if 1 <= week && week <= nweeks { + // ordinal = week ordinal - delta + let weekord = week * 7 + weekday as u32; + let delta = flags.isoweek_delta(); + if weekord <= delta { // ordinal < 1, previous year + let prevflags = YearFlags::from_year(year - 1); + NaiveDate::from_of(year - 1, Of::new(weekord + prevflags.ndays() - delta, + prevflags)) + } else { + let ordinal = weekord - delta; + let ndays = flags.ndays(); + if ordinal <= ndays { // this year + NaiveDate::from_of(year, Of::new(ordinal, flags)) + } else { // ordinal > ndays, next year + let nextflags = YearFlags::from_year(year + 1); + NaiveDate::from_of(year + 1, Of::new(ordinal - ndays, nextflags)) + } + } + } else { + None + } + } + + /// Makes a new `NaiveDate` from the number of days since January 1, 1 (Day 1) + /// in the proleptic Gregorian calendar. + /// + /// Panics on the out-of-range date. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike, Weekday}; + /// + /// let d = NaiveDate::from_num_days_from_ce(735671); + /// assert_eq!(d.num_days_from_ce(), 735671); // days since January 1, 1 CE + /// assert_eq!(d.year(), 2015); + /// assert_eq!(d.month(), 3); + /// assert_eq!(d.day(), 14); + /// assert_eq!(d.ordinal(), 73); // day of year + /// assert_eq!(d.isoweekdate(), (2015, 11, Weekday::Sat)); // ISO week and weekday + /// ~~~~ + /// + /// While not directly supported by Chrono, + /// it is easy to convert from the Julian day number + /// (January 1, 4713 BCE in the *Julian* calendar being Day 0) + /// to Gregorian with this method. + /// (Note that this panics when `jd` is out of range.) + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// fn jd_to_date(jd: i32) -> NaiveDate { + /// // keep in mind that the Julian day number is 0-based + /// // while this method requires an 1-based number. + /// NaiveDate::from_num_days_from_ce(jd - 1721425) + /// } + /// + /// // January 1, 4713 BCE in Julian = November 24, 4714 BCE in Gregorian + /// assert_eq!(jd_to_date(0), NaiveDate::from_ymd(-4713, 11, 24)); + /// + /// assert_eq!(jd_to_date(1721426), NaiveDate::from_ymd(1, 1, 1)); + /// assert_eq!(jd_to_date(2450000), NaiveDate::from_ymd(1995, 10, 9)); + /// assert_eq!(jd_to_date(2451545), NaiveDate::from_ymd(2000, 1, 1)); + /// ~~~~ + #[inline] + pub fn from_num_days_from_ce(days: i32) -> NaiveDate { + NaiveDate::from_num_days_from_ce_opt(days).expect("out-of-range date") + } + + /// Makes a new `NaiveDate` from the number of days since January 1, 1 (Day 1) + /// in the proleptic Gregorian calendar. + /// + /// Returns `None` on the out-of-range date. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// let days = |ndays| NaiveDate::from_num_days_from_ce_opt(ndays); + /// assert_eq!(days(730000), Some(NaiveDate::from_ymd(1999, 9, 3))); + /// assert_eq!(days(1), Some(NaiveDate::from_ymd(1, 1, 1))); + /// assert_eq!(days(0), Some(NaiveDate::from_ymd(0, 12, 31))); + /// assert_eq!(days(-1), Some(NaiveDate::from_ymd(0, 12, 30))); + /// assert_eq!(days(100000000), None); + /// assert_eq!(days(-100000000), None); + /// ~~~~ + pub fn from_num_days_from_ce_opt(days: i32) -> Option { + let days = days + 365; // make December 31, 1 BCE equal to day 0 + let (year_div_400, cycle) = div_mod_floor(days, 146097); + let (year_mod_400, ordinal) = internals::cycle_to_yo(cycle as u32); + let flags = YearFlags::from_year_mod_400(year_mod_400 as i32); + NaiveDate::from_of(year_div_400 * 400 + year_mod_400 as i32, + Of::new(ordinal, flags)) + } + + /// Parses a string with the specified format string and returns a new `NaiveDate`. + /// See the [`format::strftime` module](../../format/strftime/index.html) + /// on the supported escape sequences. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// assert_eq!(NaiveDate::parse_from_str("2015-09-05", "%Y-%m-%d"), + /// Ok(NaiveDate::from_ymd(2015, 9, 5))); + /// assert_eq!(NaiveDate::parse_from_str("5sep2015", "%d%b%Y"), + /// Ok(NaiveDate::from_ymd(2015, 9, 5))); + /// ~~~~ + /// + /// Time and offset is ignored for the purpose of parsing. + /// + /// ~~~~ + /// # use chrono::NaiveDate; + /// assert_eq!(NaiveDate::parse_from_str("2014-5-17T12:34:56+09:30", "%Y-%m-%dT%H:%M:%S%z"), + /// Ok(NaiveDate::from_ymd(2014, 5, 17))); + /// ~~~~ + /// + /// Out-of-bound dates or insufficient fields are errors. + /// + /// ~~~~ + /// # use chrono::NaiveDate; + /// assert!(NaiveDate::parse_from_str("2015/9", "%Y/%m").is_err()); + /// assert!(NaiveDate::parse_from_str("2015/9/31", "%Y/%m/%d").is_err()); + /// ~~~~ + /// + /// All parsed fields should be consistent to each other, otherwise it's an error. + /// + /// ~~~~ + /// # use chrono::NaiveDate; + /// assert!(NaiveDate::parse_from_str("Sat, 09 Aug 2013", "%a, %d %b %Y").is_err()); + /// ~~~~ + pub fn parse_from_str(s: &str, fmt: &str) -> ParseResult { + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, StrftimeItems::new(fmt))); + parsed.to_naive_date() + } + + /// Makes a new `NaiveDateTime` from the current date and given `NaiveTime`. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveTime, NaiveDateTime}; + /// + /// let d = NaiveDate::from_ymd(2015, 6, 3); + /// let t = NaiveTime::from_hms_milli(12, 34, 56, 789); + /// + /// let dt: NaiveDateTime = d.and_time(t); + /// assert_eq!(dt.date(), d); + /// assert_eq!(dt.time(), t); + /// ~~~~ + #[inline] + pub fn and_time(&self, time: NaiveTime) -> NaiveDateTime { + NaiveDateTime::new(self.clone(), time) + } + + /// Makes a new `NaiveDateTime` from the current date, hour, minute and second. + /// + /// No [leap second](../time/index.html#leap-second-handling) is allowed here; + /// use `NaiveDate::and_hms_*` methods with a subsecond parameter instead. + /// + /// Panics on invalid hour, minute and/or second. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike, Timelike, Weekday}; + /// + /// let d = NaiveDate::from_ymd(2015, 6, 3); + /// + /// let dt: NaiveDateTime = d.and_hms(12, 34, 56); + /// assert_eq!(dt.year(), 2015); + /// assert_eq!(dt.weekday(), Weekday::Wed); + /// assert_eq!(dt.second(), 56); + /// ~~~~ + #[inline] + pub fn and_hms(&self, hour: u32, min: u32, sec: u32) -> NaiveDateTime { + self.and_hms_opt(hour, min, sec).expect("invalid time") + } + + /// Makes a new `NaiveDateTime` from the current date, hour, minute and second. + /// + /// No [leap second](../time/index.html#leap-second-handling) is allowed here; + /// use `NaiveDate::and_hms_*_opt` methods with a subsecond parameter instead. + /// + /// Returns `None` on invalid hour, minute and/or second. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// let d = NaiveDate::from_ymd(2015, 6, 3); + /// assert!(d.and_hms_opt(12, 34, 56).is_some()); + /// assert!(d.and_hms_opt(12, 34, 60).is_none()); // use `and_hms_milli_opt` instead + /// assert!(d.and_hms_opt(12, 60, 56).is_none()); + /// assert!(d.and_hms_opt(24, 34, 56).is_none()); + /// ~~~~ + #[inline] + pub fn and_hms_opt(&self, hour: u32, min: u32, sec: u32) -> Option { + NaiveTime::from_hms_opt(hour, min, sec).map(|time| self.and_time(time)) + } + + /// Makes a new `NaiveDateTime` from the current date, hour, minute, second and millisecond. + /// + /// The millisecond part can exceed 1,000 + /// in order to represent the [leap second](../time/index.html#leap-second-handling). + /// + /// Panics on invalid hour, minute, second and/or millisecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike, Timelike, Weekday}; + /// + /// let d = NaiveDate::from_ymd(2015, 6, 3); + /// + /// let dt: NaiveDateTime = d.and_hms_milli(12, 34, 56, 789); + /// assert_eq!(dt.year(), 2015); + /// assert_eq!(dt.weekday(), Weekday::Wed); + /// assert_eq!(dt.second(), 56); + /// assert_eq!(dt.nanosecond(), 789_000_000); + /// ~~~~ + #[inline] + pub fn and_hms_milli(&self, hour: u32, min: u32, sec: u32, milli: u32) -> NaiveDateTime { + self.and_hms_milli_opt(hour, min, sec, milli).expect("invalid time") + } + + /// Makes a new `NaiveDateTime` from the current date, hour, minute, second and millisecond. + /// + /// The millisecond part can exceed 1,000 + /// in order to represent the [leap second](../time/index.html#leap-second-handling). + /// + /// Returns `None` on invalid hour, minute, second and/or millisecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// let d = NaiveDate::from_ymd(2015, 6, 3); + /// assert!(d.and_hms_milli_opt(12, 34, 56, 789).is_some()); + /// assert!(d.and_hms_milli_opt(12, 34, 59, 1_789).is_some()); // leap second + /// assert!(d.and_hms_milli_opt(12, 34, 59, 2_789).is_none()); + /// assert!(d.and_hms_milli_opt(12, 34, 60, 789).is_none()); + /// assert!(d.and_hms_milli_opt(12, 60, 56, 789).is_none()); + /// assert!(d.and_hms_milli_opt(24, 34, 56, 789).is_none()); + /// ~~~~ + #[inline] + pub fn and_hms_milli_opt(&self, hour: u32, min: u32, sec: u32, + milli: u32) -> Option { + NaiveTime::from_hms_milli_opt(hour, min, sec, milli).map(|time| self.and_time(time)) + } + + /// Makes a new `NaiveDateTime` from the current date, hour, minute, second and microsecond. + /// + /// The microsecond part can exceed 1,000,000 + /// in order to represent the [leap second](../time/index.html#leap-second-handling). + /// + /// Panics on invalid hour, minute, second and/or microsecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike, Timelike, Weekday}; + /// + /// let d = NaiveDate::from_ymd(2015, 6, 3); + /// + /// let dt: NaiveDateTime = d.and_hms_micro(12, 34, 56, 789_012); + /// assert_eq!(dt.year(), 2015); + /// assert_eq!(dt.weekday(), Weekday::Wed); + /// assert_eq!(dt.second(), 56); + /// assert_eq!(dt.nanosecond(), 789_012_000); + /// ~~~~ + #[inline] + pub fn and_hms_micro(&self, hour: u32, min: u32, sec: u32, micro: u32) -> NaiveDateTime { + self.and_hms_micro_opt(hour, min, sec, micro).expect("invalid time") + } + + /// Makes a new `NaiveDateTime` from the current date, hour, minute, second and microsecond. + /// + /// The microsecond part can exceed 1,000,000 + /// in order to represent the [leap second](../time/index.html#leap-second-handling). + /// + /// Returns `None` on invalid hour, minute, second and/or microsecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// let d = NaiveDate::from_ymd(2015, 6, 3); + /// assert!(d.and_hms_micro_opt(12, 34, 56, 789_012).is_some()); + /// assert!(d.and_hms_micro_opt(12, 34, 59, 1_789_012).is_some()); // leap second + /// assert!(d.and_hms_micro_opt(12, 34, 59, 2_789_012).is_none()); + /// assert!(d.and_hms_micro_opt(12, 34, 60, 789_012).is_none()); + /// assert!(d.and_hms_micro_opt(12, 60, 56, 789_012).is_none()); + /// assert!(d.and_hms_micro_opt(24, 34, 56, 789_012).is_none()); + /// ~~~~ + #[inline] + pub fn and_hms_micro_opt(&self, hour: u32, min: u32, sec: u32, + micro: u32) -> Option { + NaiveTime::from_hms_micro_opt(hour, min, sec, micro).map(|time| self.and_time(time)) + } + + /// Makes a new `NaiveDateTime` from the current date, hour, minute, second and nanosecond. + /// + /// The nanosecond part can exceed 1,000,000,000 + /// in order to represent the [leap second](../time/index.html#leap-second-handling). + /// + /// Panics on invalid hour, minute, second and/or nanosecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike, Timelike, Weekday}; + /// + /// let d = NaiveDate::from_ymd(2015, 6, 3); + /// + /// let dt: NaiveDateTime = d.and_hms_nano(12, 34, 56, 789_012_345); + /// assert_eq!(dt.year(), 2015); + /// assert_eq!(dt.weekday(), Weekday::Wed); + /// assert_eq!(dt.second(), 56); + /// assert_eq!(dt.nanosecond(), 789_012_345); + /// ~~~~ + #[inline] + pub fn and_hms_nano(&self, hour: u32, min: u32, sec: u32, nano: u32) -> NaiveDateTime { + self.and_hms_nano_opt(hour, min, sec, nano).expect("invalid time") + } + + /// Makes a new `NaiveDateTime` from the current date, hour, minute, second and nanosecond. + /// + /// The nanosecond part can exceed 1,000,000,000 + /// in order to represent the [leap second](../time/index.html#leap-second-handling). + /// + /// Returns `None` on invalid hour, minute, second and/or nanosecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// let d = NaiveDate::from_ymd(2015, 6, 3); + /// assert!(d.and_hms_nano_opt(12, 34, 56, 789_012_345).is_some()); + /// assert!(d.and_hms_nano_opt(12, 34, 59, 1_789_012_345).is_some()); // leap second + /// assert!(d.and_hms_nano_opt(12, 34, 59, 2_789_012_345).is_none()); + /// assert!(d.and_hms_nano_opt(12, 34, 60, 789_012_345).is_none()); + /// assert!(d.and_hms_nano_opt(12, 60, 56, 789_012_345).is_none()); + /// assert!(d.and_hms_nano_opt(24, 34, 56, 789_012_345).is_none()); + /// ~~~~ + #[inline] + pub fn and_hms_nano_opt(&self, hour: u32, min: u32, sec: u32, + nano: u32) -> Option { + NaiveTime::from_hms_nano_opt(hour, min, sec, nano).map(|time| self.and_time(time)) + } + + /// Returns the packed month-day-flags. + #[inline] + fn mdf(&self) -> Mdf { + self.of().to_mdf() + } + + /// Returns the packed ordinal-flags. + #[inline] + fn of(&self) -> Of { + Of((self.ymdf & 0b1111_11111_1111) as u32) + } + + /// Makes a new `NaiveDate` with the packed month-day-flags changed. + /// + /// Returns `None` when the resulting `NaiveDate` would be invalid. + #[inline] + fn with_mdf(&self, mdf: Mdf) -> Option { + self.with_of(mdf.to_of()) + } + + /// Makes a new `NaiveDate` with the packed ordinal-flags changed. + /// + /// Returns `None` when the resulting `NaiveDate` would be invalid. + #[inline] + fn with_of(&self, of: Of) -> Option { + if of.valid() { + let Of(of) = of; + Some(NaiveDate { ymdf: (self.ymdf & !0b111111111_1111) | of as DateImpl }) + } else { + None + } + } + + /// Makes a new `NaiveDate` for the next calendar date. + /// + /// Panics when `self` is the last representable date. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 6, 3).succ(), NaiveDate::from_ymd(2015, 6, 4)); + /// assert_eq!(NaiveDate::from_ymd(2015, 6, 30).succ(), NaiveDate::from_ymd(2015, 7, 1)); + /// assert_eq!(NaiveDate::from_ymd(2015, 12, 31).succ(), NaiveDate::from_ymd(2016, 1, 1)); + /// ~~~~ + #[inline] + pub fn succ(&self) -> NaiveDate { + self.succ_opt().expect("out of bound") + } + + /// Makes a new `NaiveDate` for the next calendar date. + /// + /// Returns `None` when `self` is the last representable date. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// use chrono::naive::date::MAX; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 6, 3).succ_opt(), + /// Some(NaiveDate::from_ymd(2015, 6, 4))); + /// assert_eq!(MAX.succ_opt(), None); + /// ~~~~ + #[inline] + pub fn succ_opt(&self) -> Option { + self.with_of(self.of().succ()).or_else(|| NaiveDate::from_ymd_opt(self.year() + 1, 1, 1)) + } + + /// Makes a new `NaiveDate` for the previous calendar date. + /// + /// Panics when `self` is the first representable date. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 6, 3).pred(), NaiveDate::from_ymd(2015, 6, 2)); + /// assert_eq!(NaiveDate::from_ymd(2015, 6, 1).pred(), NaiveDate::from_ymd(2015, 5, 31)); + /// assert_eq!(NaiveDate::from_ymd(2015, 1, 1).pred(), NaiveDate::from_ymd(2014, 12, 31)); + /// ~~~~ + #[inline] + pub fn pred(&self) -> NaiveDate { + self.pred_opt().expect("out of bound") + } + + /// Makes a new `NaiveDate` for the previous calendar date. + /// + /// Returns `None` when `self` is the first representable date. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// use chrono::naive::date::MIN; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 6, 3).pred_opt(), + /// Some(NaiveDate::from_ymd(2015, 6, 2))); + /// assert_eq!(MIN.pred_opt(), None); + /// ~~~~ + #[inline] + pub fn pred_opt(&self) -> Option { + self.with_of(self.of().pred()).or_else(|| NaiveDate::from_ymd_opt(self.year() - 1, 12, 31)) + } + + /// Adds the `days` part of given `Duration` to the current date. + /// + /// Returns `None` when it will result in overflow. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Duration}; + /// use chrono::naive::date::MAX; + /// + /// let d = NaiveDate::from_ymd(2015, 9, 5); + /// assert_eq!(d.checked_add(Duration::days(40)), Some(NaiveDate::from_ymd(2015, 10, 15))); + /// assert_eq!(d.checked_add(Duration::days(-40)), Some(NaiveDate::from_ymd(2015, 7, 27))); + /// assert_eq!(d.checked_add(Duration::days(1000_000_000)), None); + /// assert_eq!(d.checked_add(Duration::days(-1000_000_000)), None); + /// assert_eq!(MAX.checked_add(Duration::days(1)), None); + /// ~~~~ + pub fn checked_add(self, rhs: Duration) -> Option { + let year = self.year(); + let (mut year_div_400, year_mod_400) = div_mod_floor(year, 400); + let cycle = internals::yo_to_cycle(year_mod_400 as u32, self.of().ordinal()); + let cycle = try_opt!((cycle as i32).checked_add(try_opt!(rhs.num_days().to_i32()))); + let (cycle_div_400y, cycle) = div_mod_floor(cycle, 146097); + year_div_400 += cycle_div_400y; + + let (year_mod_400, ordinal) = internals::cycle_to_yo(cycle as u32); + let flags = YearFlags::from_year_mod_400(year_mod_400 as i32); + NaiveDate::from_of(year_div_400 * 400 + year_mod_400 as i32, + Of::new(ordinal, flags)) + } + + /// Subtracts the `days` part of given `Duration` from the current date. + /// + /// Returns `None` when it will result in overflow. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Duration}; + /// use chrono::naive::date::MIN; + /// + /// let d = NaiveDate::from_ymd(2015, 9, 5); + /// assert_eq!(d.checked_sub(Duration::days(40)), Some(NaiveDate::from_ymd(2015, 7, 27))); + /// assert_eq!(d.checked_sub(Duration::days(-40)), Some(NaiveDate::from_ymd(2015, 10, 15))); + /// assert_eq!(d.checked_sub(Duration::days(1000_000_000)), None); + /// assert_eq!(d.checked_sub(Duration::days(-1000_000_000)), None); + /// assert_eq!(MIN.checked_sub(Duration::days(1)), None); + /// ~~~~ + pub fn checked_sub(self, rhs: Duration) -> Option { + let year = self.year(); + let (mut year_div_400, year_mod_400) = div_mod_floor(year, 400); + let cycle = internals::yo_to_cycle(year_mod_400 as u32, self.of().ordinal()); + let cycle = try_opt!((cycle as i32).checked_sub(try_opt!(rhs.num_days().to_i32()))); + let (cycle_div_400y, cycle) = div_mod_floor(cycle, 146097); + year_div_400 += cycle_div_400y; + + let (year_mod_400, ordinal) = internals::cycle_to_yo(cycle as u32); + let flags = YearFlags::from_year_mod_400(year_mod_400 as i32); + NaiveDate::from_of(year_div_400 * 400 + year_mod_400 as i32, + Of::new(ordinal, flags)) + } + + /// Formats the date with the specified formatting items. + /// Otherwise it is same to the ordinary `format` method. + /// + /// The `Iterator` of items should be `Clone`able, + /// since the resulting `DelayedFormat` value may be formatted multiple times. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// use chrono::format::strftime::StrftimeItems; + /// + /// let fmt = StrftimeItems::new("%Y-%m-%d"); + /// let d = NaiveDate::from_ymd(2015, 9, 5); + /// assert_eq!(d.format_with_items(fmt.clone()).to_string(), "2015-09-05"); + /// assert_eq!(d.format("%Y-%m-%d").to_string(), "2015-09-05"); + /// ~~~~ + #[inline] + pub fn format_with_items<'a, I>(&self, items: I) -> DelayedFormat + where I: Iterator> + Clone { + DelayedFormat::new(Some(self.clone()), None, items) + } + + /// Formats the date with the specified format string. + /// See the [`format::strftime` module](../../format/strftime/index.html) + /// on the supported escape sequences. + /// + /// This returns a `DelayedFormat`, + /// which gets converted to a string only when actual formatting happens. + /// You may use the `to_string` method to get a `String`, + /// or just feed it into `print!` and other formatting macros. + /// (In this way it avoids the redundant memory allocation.) + /// + /// A wrong format string does *not* issue an error immediately. + /// Rather, converting or formatting the `DelayedFormat` fails. + /// You are recommended to immediately use `DelayedFormat` for this reason. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveDate; + /// + /// let d = NaiveDate::from_ymd(2015, 9, 5); + /// assert_eq!(d.format("%Y-%m-%d").to_string(), "2015-09-05"); + /// assert_eq!(d.format("%A, %-d %B, %C%y").to_string(), "Saturday, 5 September, 2015"); + /// ~~~~ + #[inline] + pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat> { + self.format_with_items(StrftimeItems::new(fmt)) + } +} + +impl Datelike for NaiveDate { + /// Returns the year number in the [calendar date](./index.html#calendar-date). + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).year(), 2015); + /// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).year(), -308); // 309 BCE + /// ~~~~ + #[inline] + fn year(&self) -> i32 { + self.ymdf >> 13 + } + + /// Returns the month number starting from 1. + /// + /// The return value ranges from 1 to 12. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).month(), 9); + /// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).month(), 3); + /// ~~~~ + #[inline] + fn month(&self) -> u32 { + self.mdf().month() + } + + /// Returns the month number starting from 0. + /// + /// The return value ranges from 0 to 11. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).month0(), 8); + /// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).month0(), 2); + /// ~~~~ + #[inline] + fn month0(&self) -> u32 { + self.mdf().month() - 1 + } + + /// Returns the day of month starting from 1. + /// + /// The return value ranges from 1 to 31. (The last day of month differs by months.) + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).day(), 8); + /// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).day(), 14); + /// ~~~~ + /// + /// Combined with [`NaiveDate::pred`](./struct.NaiveDate.html#method.pred), + /// one can determine the number of days in a particular month. + /// (Note that this panics when `year` is out of range.) + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// fn ndays_in_month(year: i32, month: u32) -> u32 { + /// // the first day of the next month... + /// let (y, m) = if month == 12 { (year + 1, 1) } else { (year, month + 1) }; + /// let d = NaiveDate::from_ymd(y, m, 1); + /// + /// // ...is preceded by the last day of the original month + /// d.pred().day() + /// } + /// + /// assert_eq!(ndays_in_month(2015, 8), 31); + /// assert_eq!(ndays_in_month(2015, 9), 30); + /// assert_eq!(ndays_in_month(2015, 12), 31); + /// assert_eq!(ndays_in_month(2016, 2), 29); + /// assert_eq!(ndays_in_month(2017, 2), 28); + /// ~~~~ + #[inline] + fn day(&self) -> u32 { + self.mdf().day() + } + + /// Returns the day of month starting from 0. + /// + /// The return value ranges from 0 to 30. (The last day of month differs by months.) + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).day0(), 7); + /// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).day0(), 13); + /// ~~~~ + #[inline] + fn day0(&self) -> u32 { + self.mdf().day() - 1 + } + + /// Returns the day of year starting from 1. + /// + /// The return value ranges from 1 to 366. (The last day of year differs by years.) + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).ordinal(), 251); + /// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).ordinal(), 74); + /// ~~~~ + /// + /// Combined with [`NaiveDate::pred`](./struct.NaiveDate.html#method.pred), + /// one can determine the number of days in a particular year. + /// (Note that this panics when `year` is out of range.) + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// fn ndays_in_year(year: i32) -> u32 { + /// // the first day of the next year... + /// let d = NaiveDate::from_ymd(year + 1, 1, 1); + /// + /// // ...is preceded by the last day of the original year + /// d.pred().ordinal() + /// } + /// + /// assert_eq!(ndays_in_year(2015), 365); + /// assert_eq!(ndays_in_year(2016), 366); + /// assert_eq!(ndays_in_year(2017), 365); + /// assert_eq!(ndays_in_year(2000), 366); + /// assert_eq!(ndays_in_year(2100), 365); + /// ~~~~ + #[inline] + fn ordinal(&self) -> u32 { + self.of().ordinal() + } + + /// Returns the day of year starting from 0. + /// + /// The return value ranges from 0 to 365. (The last day of year differs by years.) + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).ordinal0(), 250); + /// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).ordinal0(), 73); + /// ~~~~ + #[inline] + fn ordinal0(&self) -> u32 { + self.of().ordinal() - 1 + } + + /// Returns the day of week. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike, Weekday}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).weekday(), Weekday::Tue); + /// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).weekday(), Weekday::Fri); + /// ~~~~ + #[inline] + fn weekday(&self) -> Weekday { + self.of().weekday() + } + + fn isoweekdate(&self) -> (i32, u32, Weekday) { + let of = self.of(); + let year = self.year(); + let (rawweek, weekday) = of.isoweekdate_raw(); + if rawweek < 1 { // previous year + let prevlastweek = YearFlags::from_year(year - 1).nisoweeks(); + (year - 1, prevlastweek, weekday) + } else { + let lastweek = of.flags().nisoweeks(); + if rawweek > lastweek { // next year + (year + 1, 1, weekday) + } else { + (year, rawweek, weekday) + } + } + } + + /// Makes a new `NaiveDate` with the year number changed. + /// + /// Returns `None` when the resulting `NaiveDate` would be invalid. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_year(2016), + /// Some(NaiveDate::from_ymd(2016, 9, 8))); + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_year(-308), + /// Some(NaiveDate::from_ymd(-308, 9, 8))); + /// ~~~~ + /// + /// A leap day (February 29) is a good example that this method can return `None`. + /// + /// ~~~~ + /// # use chrono::{NaiveDate, Datelike}; + /// assert!(NaiveDate::from_ymd(2016, 2, 29).with_year(2015).is_none()); + /// assert!(NaiveDate::from_ymd(2016, 2, 29).with_year(2020).is_some()); + /// ~~~~ + #[inline] + fn with_year(&self, year: i32) -> Option { + // we need to operate with `mdf` since we should keep the month and day number as is + let mdf = self.mdf(); + + // adjust the flags as needed + let flags = YearFlags::from_year(year); + let mdf = mdf.with_flags(flags); + + NaiveDate::from_mdf(year, mdf) + } + + /// Makes a new `NaiveDate` with the month number (starting from 1) changed. + /// + /// Returns `None` when the resulting `NaiveDate` would be invalid. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_month(10), + /// Some(NaiveDate::from_ymd(2015, 10, 8))); + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_month(13), None); // no month 13 + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 30).with_month(2), None); // no February 30 + /// ~~~~ + #[inline] + fn with_month(&self, month: u32) -> Option { + self.with_mdf(self.mdf().with_month(month)) + } + + /// Makes a new `NaiveDate` with the month number (starting from 0) changed. + /// + /// Returns `None` when the resulting `NaiveDate` would be invalid. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_month0(9), + /// Some(NaiveDate::from_ymd(2015, 10, 8))); + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_month0(12), None); // no month 13 + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 30).with_month0(1), None); // no February 30 + /// ~~~~ + #[inline] + fn with_month0(&self, month0: u32) -> Option { + self.with_mdf(self.mdf().with_month(month0 + 1)) + } + + /// Makes a new `NaiveDate` with the day of month (starting from 1) changed. + /// + /// Returns `None` when the resulting `NaiveDate` would be invalid. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_day(30), + /// Some(NaiveDate::from_ymd(2015, 9, 30))); + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_day(31), + /// None); // no September 31 + /// ~~~~ + #[inline] + fn with_day(&self, day: u32) -> Option { + self.with_mdf(self.mdf().with_day(day)) + } + + /// Makes a new `NaiveDate` with the day of month (starting from 0) changed. + /// + /// Returns `None` when the resulting `NaiveDate` would be invalid. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_day0(29), + /// Some(NaiveDate::from_ymd(2015, 9, 30))); + /// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_day0(30), + /// None); // no September 31 + /// ~~~~ + #[inline] + fn with_day0(&self, day0: u32) -> Option { + self.with_mdf(self.mdf().with_day(day0 + 1)) + } + + /// Makes a new `NaiveDate` with the day of year (starting from 1) changed. + /// + /// Returns `None` when the resulting `NaiveDate` would be invalid. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 1, 1).with_ordinal(60), + /// Some(NaiveDate::from_ymd(2015, 3, 1))); + /// assert_eq!(NaiveDate::from_ymd(2015, 1, 1).with_ordinal(366), + /// None); // 2015 had only 365 days + /// + /// assert_eq!(NaiveDate::from_ymd(2016, 1, 1).with_ordinal(60), + /// Some(NaiveDate::from_ymd(2016, 2, 29))); + /// assert_eq!(NaiveDate::from_ymd(2016, 1, 1).with_ordinal(366), + /// Some(NaiveDate::from_ymd(2016, 12, 31))); + /// ~~~~ + #[inline] + fn with_ordinal(&self, ordinal: u32) -> Option { + self.with_of(self.of().with_ordinal(ordinal)) + } + + /// Makes a new `NaiveDate` with the day of year (starting from 0) changed. + /// + /// Returns `None` when the resulting `NaiveDate` would be invalid. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, Datelike}; + /// + /// assert_eq!(NaiveDate::from_ymd(2015, 1, 1).with_ordinal0(59), + /// Some(NaiveDate::from_ymd(2015, 3, 1))); + /// assert_eq!(NaiveDate::from_ymd(2015, 1, 1).with_ordinal0(365), + /// None); // 2015 had only 365 days + /// + /// assert_eq!(NaiveDate::from_ymd(2016, 1, 1).with_ordinal0(59), + /// Some(NaiveDate::from_ymd(2016, 2, 29))); + /// assert_eq!(NaiveDate::from_ymd(2016, 1, 1).with_ordinal0(365), + /// Some(NaiveDate::from_ymd(2016, 12, 31))); + /// ~~~~ + #[inline] + fn with_ordinal0(&self, ordinal0: u32) -> Option { + self.with_of(self.of().with_ordinal(ordinal0 + 1)) + } +} + +/// `NaiveDate` can be used as a key to the hash maps. +impl hash::Hash for NaiveDate { + fn hash(&self, state: &mut H) { + // don't need to strip flags, as we can safely assume that it is correct + self.ymdf.hash(state); + } +} + +/// An addition of `Duration` to `NaiveDate` discards the fractional days, +/// rounding to the closest integral number of days towards `Duration::zero()`. +/// +/// Panics on underflow or overflow. +/// Use [`NaiveDate::checked_add`](#method.checked_add) to detect that. +/// +/// # Example +/// +/// ~~~~ +/// use chrono::{NaiveDate, Duration}; +/// +/// let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d); +/// assert_eq!(ymd(2014, 1, 1) + Duration::zero(), ymd(2014, 1, 1)); +/// assert_eq!(ymd(2014, 1, 1) + Duration::seconds(86399), ymd(2014, 1, 1)); +/// assert_eq!(ymd(2014, 1, 1) + Duration::seconds(-86399), ymd(2014, 1, 1)); +/// assert_eq!(ymd(2014, 1, 1) + Duration::days(1), ymd(2014, 1, 2)); +/// assert_eq!(ymd(2014, 1, 1) + Duration::days(-1), ymd(2013, 12, 31)); +/// assert_eq!(ymd(2014, 1, 1) + Duration::days(364), ymd(2014, 12, 31)); +/// assert_eq!(ymd(2014, 1, 1) + Duration::days(365*4 + 1), ymd(2018, 1, 1)); +/// assert_eq!(ymd(2014, 1, 1) + Duration::days(365*400 + 97), ymd(2414, 1, 1)); +/// ~~~~ +impl Add for NaiveDate { + type Output = NaiveDate; + + #[inline] + fn add(self, rhs: Duration) -> NaiveDate { + self.checked_add(rhs).expect("`NaiveDate + Duration` overflowed") + } +} + +/// A subtraction of `NaiveDate` from `NaiveDate` yields a `Duration` of integral numbers. +/// +/// This does not overflow or underflow at all, +/// as all possible output fits in the range of `Duration`. +/// +/// # Example +/// +/// ~~~~ +/// use chrono::{NaiveDate, Duration}; +/// +/// let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d); +/// assert_eq!(ymd(2014, 1, 1) - ymd(2014, 1, 1), Duration::zero()); +/// assert_eq!(ymd(2014, 1, 1) - ymd(2013, 12, 31), Duration::days(1)); +/// assert_eq!(ymd(2014, 1, 1) - ymd(2014, 1, 2), Duration::days(-1)); +/// assert_eq!(ymd(2014, 1, 1) - ymd(2013, 9, 23), Duration::days(100)); +/// assert_eq!(ymd(2014, 1, 1) - ymd(2013, 1, 1), Duration::days(365)); +/// assert_eq!(ymd(2014, 1, 1) - ymd(2010, 1, 1), Duration::days(365*4 + 1)); +/// assert_eq!(ymd(2014, 1, 1) - ymd(1614, 1, 1), Duration::days(365*400 + 97)); +/// ~~~~ +impl Sub for NaiveDate { + type Output = Duration; + + fn sub(self, rhs: NaiveDate) -> Duration { + let year1 = self.year(); + let year2 = rhs.year(); + let (year1_div_400, year1_mod_400) = div_mod_floor(year1, 400); + let (year2_div_400, year2_mod_400) = div_mod_floor(year2, 400); + let cycle1 = internals::yo_to_cycle(year1_mod_400 as u32, self.of().ordinal()) as i64; + let cycle2 = internals::yo_to_cycle(year2_mod_400 as u32, rhs.of().ordinal()) as i64; + Duration::days((year1_div_400 as i64 - year2_div_400 as i64) * 146097 + (cycle1 - cycle2)) + } +} + +/// A subtraction of `Duration` from `NaiveDate` discards the fractional days, +/// rounding to the closest integral number of days towards `Duration::zero()`. +/// +/// Panics on underflow or overflow. +/// Use [`NaiveDate::checked_sub`](#method.checked_sub) to detect that. +/// +/// # Example +/// +/// ~~~~ +/// use chrono::{NaiveDate, Duration}; +/// +/// let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d); +/// assert_eq!(ymd(2014, 1, 1) - Duration::zero(), ymd(2014, 1, 1)); +/// assert_eq!(ymd(2014, 1, 1) - Duration::seconds(86399), ymd(2014, 1, 1)); +/// assert_eq!(ymd(2014, 1, 1) - Duration::seconds(-86399), ymd(2014, 1, 1)); +/// assert_eq!(ymd(2014, 1, 1) - Duration::days(1), ymd(2013, 12, 31)); +/// assert_eq!(ymd(2014, 1, 1) - Duration::days(-1), ymd(2014, 1, 2)); +/// assert_eq!(ymd(2014, 1, 1) - Duration::days(364), ymd(2013, 1, 2)); +/// assert_eq!(ymd(2014, 1, 1) - Duration::days(365*4 + 1), ymd(2010, 1, 1)); +/// assert_eq!(ymd(2014, 1, 1) - Duration::days(365*400 + 97), ymd(1614, 1, 1)); +/// ~~~~ +impl Sub for NaiveDate { + type Output = NaiveDate; + + #[inline] + fn sub(self, rhs: Duration) -> NaiveDate { + self.checked_sub(rhs).expect("`NaiveDate - Duration` overflowed") + } +} + +/// The `Debug` output of the naive date `d` is same to `d.format("%Y-%m-%d")`. +/// +/// The string printed can be readily parsed via the `parse` method on `str`. +/// +/// # Example +/// +/// ~~~~ +/// use chrono::NaiveDate; +/// +/// assert_eq!(format!("{:?}", NaiveDate::from_ymd(2015, 9, 5)), "2015-09-05"); +/// assert_eq!(format!("{:?}", NaiveDate::from_ymd( 0, 1, 1)), "0000-01-01"); +/// assert_eq!(format!("{:?}", NaiveDate::from_ymd(9999, 12, 31)), "9999-12-31"); +/// ~~~~ +/// +/// ISO 8601 requires an explicit sign for years before 1 BCE or after 9999 CE. +/// +/// ~~~~ +/// # use chrono::NaiveDate; +/// assert_eq!(format!("{:?}", NaiveDate::from_ymd( -1, 1, 1)), "-0001-01-01"); +/// assert_eq!(format!("{:?}", NaiveDate::from_ymd(10000, 12, 31)), "+10000-12-31"); +/// ~~~~ +impl fmt::Debug for NaiveDate { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let year = self.year(); + let mdf = self.mdf(); + if 0 <= year && year <= 9999 { + write!(f, "{:04}-{:02}-{:02}", year, mdf.month(), mdf.day()) + } else { + // ISO 8601 requires the explicit sign for out-of-range years + write!(f, "{:+05}-{:02}-{:02}", year, mdf.month(), mdf.day()) + } + } +} + +/// The `Display` output of the naive date `d` is same to `d.format("%Y-%m-%d")`. +/// +/// The string printed can be readily parsed via the `parse` method on `str`. +/// +/// # Example +/// +/// ~~~~ +/// use chrono::NaiveDate; +/// +/// assert_eq!(format!("{}", NaiveDate::from_ymd(2015, 9, 5)), "2015-09-05"); +/// assert_eq!(format!("{}", NaiveDate::from_ymd( 0, 1, 1)), "0000-01-01"); +/// assert_eq!(format!("{}", NaiveDate::from_ymd(9999, 12, 31)), "9999-12-31"); +/// ~~~~ +/// +/// ISO 8601 requires an explicit sign for years before 1 BCE or after 9999 CE. +/// +/// ~~~~ +/// # use chrono::NaiveDate; +/// assert_eq!(format!("{}", NaiveDate::from_ymd( -1, 1, 1)), "-0001-01-01"); +/// assert_eq!(format!("{}", NaiveDate::from_ymd(10000, 12, 31)), "+10000-12-31"); +/// ~~~~ +impl fmt::Display for NaiveDate { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(self, f) } +} + +/// Parsing a str into a `NaiveDate` uses the same format, `%Y-%m-%d`, as `Debug` and `Display`. +/// +/// # Example +/// +/// ~~~~ +/// use chrono::NaiveDate; +/// +/// let d = NaiveDate::from_ymd(2015, 9, 18); +/// assert_eq!(format!("{}", d).parse::(), Ok(d)); +/// +/// let d = NaiveDate::from_ymd(12345, 6, 7); +/// assert_eq!(format!("{}", d).parse::(), Ok(d)); +/// +/// assert!("foo".parse::().is_err()); +/// ~~~~ +impl str::FromStr for NaiveDate { + type Err = ParseError; + + fn from_str(s: &str) -> ParseResult { + const ITEMS: &'static [Item<'static>] = &[ + Item::Space(""), Item::Numeric(Numeric::Year, Pad::Zero), + Item::Space(""), Item::Literal("-"), + Item::Space(""), Item::Numeric(Numeric::Month, Pad::Zero), + Item::Space(""), Item::Literal("-"), + Item::Space(""), Item::Numeric(Numeric::Day, Pad::Zero), + Item::Space(""), + ]; + + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parsed.to_naive_date() + } +} + +#[cfg(feature = "rustc-serialize")] +mod rustc_serialize { + use super::NaiveDate; + use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; + + // TODO the current serialization format is NEVER intentionally defined. + // this basically follows the automatically generated implementation for those traits, + // plus manual verification steps for avoiding security problem. + // in the future it is likely to be redefined to more sane and reasonable format. + + impl Encodable for NaiveDate { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + let ymdf = self.to_serialized(); + s.emit_struct("NaiveDate", 1, |s| { + try!(s.emit_struct_field("ymdf", 0, |s| ymdf.encode(s))); + Ok(()) + }) + } + } + + impl Decodable for NaiveDate { + fn decode(d: &mut D) -> Result { + d.read_struct("NaiveDate", 1, |d| { + let ymdf = try!(d.read_struct_field("ymdf", 0, Decodable::decode)); + NaiveDate::from_serialized(ymdf).ok_or_else(|| d.error("invalid date")) + }) + } + } + + #[test] + fn test_encodable() { + use rustc_serialize::json::encode; + + assert_eq!(encode(&NaiveDate::from_ymd(2016, 7, 8)).ok(), + Some(r#"{"ymdf":16518115}"#.into())); + assert_eq!(encode(&NaiveDate::from_ymd(0, 1, 1)).ok(), + Some(r#"{"ymdf":20}"#.into())); + assert_eq!(encode(&NaiveDate::from_ymd(-1, 12, 31)).ok(), + Some(r#"{"ymdf":-2341}"#.into())); + assert_eq!(encode(&super::MIN).ok(), + Some(r#"{"ymdf":-2147483625}"#.into())); + assert_eq!(encode(&super::MAX).ok(), + Some(r#"{"ymdf":2147481311}"#.into())); + } + + #[test] + fn test_decodable() { + use rustc_serialize::json; + use std::{i32, i64}; + + let decode = |s: &str| json::decode::(s); + + assert_eq!(decode(r#"{"ymdf":16518115}"#).ok(), Some(NaiveDate::from_ymd(2016, 7, 8))); + assert_eq!(decode(r#"{"ymdf":20}"#).ok(), Some(NaiveDate::from_ymd(0, 1, 1))); + assert_eq!(decode(r#"{"ymdf":-2341}"#).ok(), Some(NaiveDate::from_ymd(-1, 12, 31))); + assert_eq!(decode(r#"{"ymdf":-2147483625}"#).ok(), Some(super::MIN)); + assert_eq!(decode(r#"{"ymdf":2147481311}"#).ok(), Some(super::MAX)); + + // some extreme values and zero are always invalid + assert!(decode(r#"{"ymdf":0}"#).is_err()); + assert!(decode(r#"{"ymdf":1}"#).is_err()); + assert!(decode(r#"{"ymdf":-1}"#).is_err()); + assert!(decode(&format!(r#"{{"ymdf":{}}}"#, i32::MIN)).is_err()); + assert!(decode(&format!(r#"{{"ymdf":{}}}"#, i32::MAX)).is_err()); + assert!(decode(&format!(r#"{{"ymdf":{}}}"#, i64::MIN)).is_err()); + assert!(decode(&format!(r#"{{"ymdf":{}}}"#, i64::MAX)).is_err()); + + // bad formats + assert!(decode(r#"{"ymdf":20.01}"#).is_err()); + assert!(decode(r#"{"ymdf":"string"}"#).is_err()); + assert!(decode(r#"{"ymdf":null}"#).is_err()); + assert!(decode(r#"{}"#).is_err()); + assert!(decode(r#"{"date":20}"#).is_err()); + assert!(decode(r#"20"#).is_err()); + assert!(decode(r#""string""#).is_err()); + assert!(decode(r#""2016-07-08""#).is_err()); // :( + assert!(decode(r#"null"#).is_err()); + } +} + +#[cfg(feature = "serde")] +mod serde { + use super::NaiveDate; + use serde::{ser, de}; + + // TODO not very optimized for space (binary formats would want something better) + + impl ser::Serialize for NaiveDate { + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + where S: ser::Serializer + { + serializer.serialize_str(&format!("{:?}", self)) + } + } + + struct NaiveDateVisitor; + + impl de::Visitor for NaiveDateVisitor { + type Value = NaiveDate; + + fn visit_str(&mut self, value: &str) -> Result + where E: de::Error + { + value.parse().map_err(|err| E::custom(format!("{}", err))) + } + } + + impl de::Deserialize for NaiveDate { + fn deserialize(deserializer: &mut D) -> Result + where D: de::Deserializer + { + deserializer.deserialize(NaiveDateVisitor) + } + } + + #[cfg(test)] extern crate serde_json; + + #[test] + fn test_serde_serialize() { + use self::serde_json::to_string; + + assert_eq!(to_string(&NaiveDate::from_ymd(2014, 7, 24)).ok(), + Some(r#""2014-07-24""#.into())); + assert_eq!(to_string(&NaiveDate::from_ymd(0, 1, 1)).ok(), + Some(r#""0000-01-01""#.into())); + assert_eq!(to_string(&NaiveDate::from_ymd(-1, 12, 31)).ok(), + Some(r#""-0001-12-31""#.into())); + assert_eq!(to_string(&super::MIN).ok(), + Some(r#""-262144-01-01""#.into())); + assert_eq!(to_string(&super::MAX).ok(), + Some(r#""+262143-12-31""#.into())); + } + + #[test] + fn test_serde_deserialize() { + use self::serde_json; + use std::{i32, i64}; + + let from_str = |s: &str| serde_json::from_str::(s); + + assert_eq!(from_str(r#""2016-07-08""#).ok(), Some(NaiveDate::from_ymd(2016, 7, 8))); + assert_eq!(from_str(r#""2016-7-8""#).ok(), Some(NaiveDate::from_ymd(2016, 7, 8))); + assert_eq!(from_str(r#""+002016-07-08""#).ok(), Some(NaiveDate::from_ymd(2016, 7, 8))); + assert_eq!(from_str(r#""0000-01-01""#).ok(), Some(NaiveDate::from_ymd(0, 1, 1))); + assert_eq!(from_str(r#""0-1-1""#).ok(), Some(NaiveDate::from_ymd(0, 1, 1))); + assert_eq!(from_str(r#""-0001-12-31""#).ok(), Some(NaiveDate::from_ymd(-1, 12, 31))); + assert_eq!(from_str(r#""-262144-01-01""#).ok(), Some(super::MIN)); + assert_eq!(from_str(r#""+262143-12-31""#).ok(), Some(super::MAX)); + + // bad formats + assert!(from_str(r#""""#).is_err()); + assert!(from_str(r#""20001231""#).is_err()); + assert!(from_str(r#""2000-00-00""#).is_err()); + assert!(from_str(r#""2000-02-30""#).is_err()); + assert!(from_str(r#""2001-02-29""#).is_err()); + assert!(from_str(r#""2002-002-28""#).is_err()); + assert!(from_str(r#""yyyy-mm-dd""#).is_err()); + assert!(from_str(r#"0"#).is_err()); + assert!(from_str(r#"20.01"#).is_err()); + assert!(from_str(&i32::MIN.to_string()).is_err()); + assert!(from_str(&i32::MAX.to_string()).is_err()); + assert!(from_str(&i64::MIN.to_string()).is_err()); + assert!(from_str(&i64::MAX.to_string()).is_err()); + assert!(from_str(r#"{}"#).is_err()); + assert!(from_str(r#"{"ymdf":20}"#).is_err()); // :( + assert!(from_str(r#"null"#).is_err()); + } +} + +#[cfg(test)] +mod tests { + use super::NaiveDate; + use super::{MIN, MIN_YEAR, MIN_DAYS_FROM_YEAR_0}; + use super::{MAX, MAX_YEAR, MAX_DAYS_FROM_YEAR_0}; + use {Datelike, Weekday}; + use duration::Duration; + use std::{i32, u32}; + + #[test] + fn test_date_from_ymd() { + let ymd_opt = |y,m,d| NaiveDate::from_ymd_opt(y, m, d); + + assert!(ymd_opt(2012, 0, 1).is_none()); + assert!(ymd_opt(2012, 1, 1).is_some()); + assert!(ymd_opt(2012, 2, 29).is_some()); + assert!(ymd_opt(2014, 2, 29).is_none()); + assert!(ymd_opt(2014, 3, 0).is_none()); + assert!(ymd_opt(2014, 3, 1).is_some()); + assert!(ymd_opt(2014, 3, 31).is_some()); + assert!(ymd_opt(2014, 3, 32).is_none()); + assert!(ymd_opt(2014, 12, 31).is_some()); + assert!(ymd_opt(2014, 13, 1).is_none()); + } + + #[test] + fn test_date_from_yo() { + let yo_opt = |y,o| NaiveDate::from_yo_opt(y, o); + let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d); + + assert_eq!(yo_opt(2012, 0), None); + assert_eq!(yo_opt(2012, 1), Some(ymd(2012, 1, 1))); + assert_eq!(yo_opt(2012, 2), Some(ymd(2012, 1, 2))); + assert_eq!(yo_opt(2012, 32), Some(ymd(2012, 2, 1))); + assert_eq!(yo_opt(2012, 60), Some(ymd(2012, 2, 29))); + assert_eq!(yo_opt(2012, 61), Some(ymd(2012, 3, 1))); + assert_eq!(yo_opt(2012, 100), Some(ymd(2012, 4, 9))); + assert_eq!(yo_opt(2012, 200), Some(ymd(2012, 7, 18))); + assert_eq!(yo_opt(2012, 300), Some(ymd(2012, 10, 26))); + assert_eq!(yo_opt(2012, 366), Some(ymd(2012, 12, 31))); + assert_eq!(yo_opt(2012, 367), None); + + assert_eq!(yo_opt(2014, 0), None); + assert_eq!(yo_opt(2014, 1), Some(ymd(2014, 1, 1))); + assert_eq!(yo_opt(2014, 2), Some(ymd(2014, 1, 2))); + assert_eq!(yo_opt(2014, 32), Some(ymd(2014, 2, 1))); + assert_eq!(yo_opt(2014, 59), Some(ymd(2014, 2, 28))); + assert_eq!(yo_opt(2014, 60), Some(ymd(2014, 3, 1))); + assert_eq!(yo_opt(2014, 100), Some(ymd(2014, 4, 10))); + assert_eq!(yo_opt(2014, 200), Some(ymd(2014, 7, 19))); + assert_eq!(yo_opt(2014, 300), Some(ymd(2014, 10, 27))); + assert_eq!(yo_opt(2014, 365), Some(ymd(2014, 12, 31))); + assert_eq!(yo_opt(2014, 366), None); + } + + #[test] + fn test_date_from_isoywd() { + let isoywd_opt = |y,w,d| NaiveDate::from_isoywd_opt(y, w, d); + let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d); + + assert_eq!(isoywd_opt(2004, 0, Weekday::Sun), None); + assert_eq!(isoywd_opt(2004, 1, Weekday::Mon), Some(ymd(2003, 12, 29))); + assert_eq!(isoywd_opt(2004, 1, Weekday::Sun), Some(ymd(2004, 1, 4))); + assert_eq!(isoywd_opt(2004, 2, Weekday::Mon), Some(ymd(2004, 1, 5))); + assert_eq!(isoywd_opt(2004, 2, Weekday::Sun), Some(ymd(2004, 1, 11))); + assert_eq!(isoywd_opt(2004, 52, Weekday::Mon), Some(ymd(2004, 12, 20))); + assert_eq!(isoywd_opt(2004, 52, Weekday::Sun), Some(ymd(2004, 12, 26))); + assert_eq!(isoywd_opt(2004, 53, Weekday::Mon), Some(ymd(2004, 12, 27))); + assert_eq!(isoywd_opt(2004, 53, Weekday::Sun), Some(ymd(2005, 1, 2))); + assert_eq!(isoywd_opt(2004, 54, Weekday::Mon), None); + + assert_eq!(isoywd_opt(2011, 0, Weekday::Sun), None); + assert_eq!(isoywd_opt(2011, 1, Weekday::Mon), Some(ymd(2011, 1, 3))); + assert_eq!(isoywd_opt(2011, 1, Weekday::Sun), Some(ymd(2011, 1, 9))); + assert_eq!(isoywd_opt(2011, 2, Weekday::Mon), Some(ymd(2011, 1, 10))); + assert_eq!(isoywd_opt(2011, 2, Weekday::Sun), Some(ymd(2011, 1, 16))); + + assert_eq!(isoywd_opt(2018, 51, Weekday::Mon), Some(ymd(2018, 12, 17))); + assert_eq!(isoywd_opt(2018, 51, Weekday::Sun), Some(ymd(2018, 12, 23))); + assert_eq!(isoywd_opt(2018, 52, Weekday::Mon), Some(ymd(2018, 12, 24))); + assert_eq!(isoywd_opt(2018, 52, Weekday::Sun), Some(ymd(2018, 12, 30))); + assert_eq!(isoywd_opt(2018, 53, Weekday::Mon), None); + } + + #[test] + fn test_date_from_isoymd_and_isoweekdate() { + for year in 2000..2401 { + for week in 1..54 { + for &weekday in [Weekday::Mon, Weekday::Tue, Weekday::Wed, Weekday::Thu, + Weekday::Fri, Weekday::Sat, Weekday::Sun].iter() { + let d = NaiveDate::from_isoywd_opt(year, week, weekday); + if d.is_some() { + let d = d.unwrap(); + assert_eq!(d.weekday(), weekday); + let (year_, week_, weekday_) = d.isoweekdate(); + assert_eq!(year_, year); + assert_eq!(week_, week); + assert_eq!(weekday_, weekday); + } + } + } + } + + for year in 2000..2401 { + for month in 1..13 { + for day in 1..32 { + let d = NaiveDate::from_ymd_opt(year, month, day); + if d.is_some() { + let d = d.unwrap(); + let (year_, week_, weekday_) = d.isoweekdate(); + let d_ = NaiveDate::from_isoywd(year_, week_, weekday_); + assert_eq!(d, d_); + } + } + } + } + } + + #[test] + fn test_date_from_num_days_from_ce() { + let from_ndays_from_ce = |days| NaiveDate::from_num_days_from_ce_opt(days); + assert_eq!(from_ndays_from_ce(1), Some(NaiveDate::from_ymd(1, 1, 1))); + assert_eq!(from_ndays_from_ce(2), Some(NaiveDate::from_ymd(1, 1, 2))); + assert_eq!(from_ndays_from_ce(31), Some(NaiveDate::from_ymd(1, 1, 31))); + assert_eq!(from_ndays_from_ce(32), Some(NaiveDate::from_ymd(1, 2, 1))); + assert_eq!(from_ndays_from_ce(59), Some(NaiveDate::from_ymd(1, 2, 28))); + assert_eq!(from_ndays_from_ce(60), Some(NaiveDate::from_ymd(1, 3, 1))); + assert_eq!(from_ndays_from_ce(365), Some(NaiveDate::from_ymd(1, 12, 31))); + assert_eq!(from_ndays_from_ce(365*1 + 1), Some(NaiveDate::from_ymd(2, 1, 1))); + assert_eq!(from_ndays_from_ce(365*2 + 1), Some(NaiveDate::from_ymd(3, 1, 1))); + assert_eq!(from_ndays_from_ce(365*3 + 1), Some(NaiveDate::from_ymd(4, 1, 1))); + assert_eq!(from_ndays_from_ce(365*4 + 2), Some(NaiveDate::from_ymd(5, 1, 1))); + assert_eq!(from_ndays_from_ce(146097 + 1), Some(NaiveDate::from_ymd(401, 1, 1))); + assert_eq!(from_ndays_from_ce(146097*5 + 1), Some(NaiveDate::from_ymd(2001, 1, 1))); + assert_eq!(from_ndays_from_ce(719163), Some(NaiveDate::from_ymd(1970, 1, 1))); + assert_eq!(from_ndays_from_ce(0), Some(NaiveDate::from_ymd(0, 12, 31))); // 1 BCE + assert_eq!(from_ndays_from_ce(-365), Some(NaiveDate::from_ymd(0, 1, 1))); + assert_eq!(from_ndays_from_ce(-366), Some(NaiveDate::from_ymd(-1, 12, 31))); // 2 BCE + + for days in (-9999..10001).map(|x| x * 100) { + assert_eq!(from_ndays_from_ce(days).map(|d| d.num_days_from_ce()), Some(days)); + } + + assert_eq!(from_ndays_from_ce(MIN.num_days_from_ce()), Some(MIN)); + assert_eq!(from_ndays_from_ce(MIN.num_days_from_ce() - 1), None); + assert_eq!(from_ndays_from_ce(MAX.num_days_from_ce()), Some(MAX)); + assert_eq!(from_ndays_from_ce(MAX.num_days_from_ce() + 1), None); + } + + #[test] + fn test_date_fields() { + fn check(year: i32, month: u32, day: u32, ordinal: u32) { + let d1 = NaiveDate::from_ymd(year, month, day); + assert_eq!(d1.year(), year); + assert_eq!(d1.month(), month); + assert_eq!(d1.day(), day); + assert_eq!(d1.ordinal(), ordinal); + + let d2 = NaiveDate::from_yo(year, ordinal); + assert_eq!(d2.year(), year); + assert_eq!(d2.month(), month); + assert_eq!(d2.day(), day); + assert_eq!(d2.ordinal(), ordinal); + + assert_eq!(d1, d2); + } + + check(2012, 1, 1, 1); + check(2012, 1, 2, 2); + check(2012, 2, 1, 32); + check(2012, 2, 29, 60); + check(2012, 3, 1, 61); + check(2012, 4, 9, 100); + check(2012, 7, 18, 200); + check(2012, 10, 26, 300); + check(2012, 12, 31, 366); + + check(2014, 1, 1, 1); + check(2014, 1, 2, 2); + check(2014, 2, 1, 32); + check(2014, 2, 28, 59); + check(2014, 3, 1, 60); + check(2014, 4, 10, 100); + check(2014, 7, 19, 200); + check(2014, 10, 27, 300); + check(2014, 12, 31, 365); + } + + #[test] + fn test_date_weekday() { + assert_eq!(NaiveDate::from_ymd(1582, 10, 15).weekday(), Weekday::Fri); + // May 20, 1875 = ISO 8601 reference date + assert_eq!(NaiveDate::from_ymd(1875, 5, 20).weekday(), Weekday::Thu); + assert_eq!(NaiveDate::from_ymd(2000, 1, 1).weekday(), Weekday::Sat); + } + + #[test] + fn test_date_with_fields() { + let d = NaiveDate::from_ymd(2000, 2, 29); + assert_eq!(d.with_year(-400), Some(NaiveDate::from_ymd(-400, 2, 29))); + assert_eq!(d.with_year(-100), None); + assert_eq!(d.with_year(1600), Some(NaiveDate::from_ymd(1600, 2, 29))); + assert_eq!(d.with_year(1900), None); + assert_eq!(d.with_year(2000), Some(NaiveDate::from_ymd(2000, 2, 29))); + assert_eq!(d.with_year(2001), None); + assert_eq!(d.with_year(2004), Some(NaiveDate::from_ymd(2004, 2, 29))); + assert_eq!(d.with_year(i32::MAX), None); + + let d = NaiveDate::from_ymd(2000, 4, 30); + assert_eq!(d.with_month(0), None); + assert_eq!(d.with_month(1), Some(NaiveDate::from_ymd(2000, 1, 30))); + assert_eq!(d.with_month(2), None); + assert_eq!(d.with_month(3), Some(NaiveDate::from_ymd(2000, 3, 30))); + assert_eq!(d.with_month(4), Some(NaiveDate::from_ymd(2000, 4, 30))); + assert_eq!(d.with_month(12), Some(NaiveDate::from_ymd(2000, 12, 30))); + assert_eq!(d.with_month(13), None); + assert_eq!(d.with_month(u32::MAX), None); + + let d = NaiveDate::from_ymd(2000, 2, 8); + assert_eq!(d.with_day(0), None); + assert_eq!(d.with_day(1), Some(NaiveDate::from_ymd(2000, 2, 1))); + assert_eq!(d.with_day(29), Some(NaiveDate::from_ymd(2000, 2, 29))); + assert_eq!(d.with_day(30), None); + assert_eq!(d.with_day(u32::MAX), None); + + let d = NaiveDate::from_ymd(2000, 5, 5); + assert_eq!(d.with_ordinal(0), None); + assert_eq!(d.with_ordinal(1), Some(NaiveDate::from_ymd(2000, 1, 1))); + assert_eq!(d.with_ordinal(60), Some(NaiveDate::from_ymd(2000, 2, 29))); + assert_eq!(d.with_ordinal(61), Some(NaiveDate::from_ymd(2000, 3, 1))); + assert_eq!(d.with_ordinal(366), Some(NaiveDate::from_ymd(2000, 12, 31))); + assert_eq!(d.with_ordinal(367), None); + assert_eq!(d.with_ordinal(u32::MAX), None); + } + + #[test] + fn test_date_num_days_from_ce() { + assert_eq!(NaiveDate::from_ymd(1, 1, 1).num_days_from_ce(), 1); + + for year in -9999..10001 { + assert_eq!(NaiveDate::from_ymd(year, 1, 1).num_days_from_ce(), + NaiveDate::from_ymd(year - 1, 12, 31).num_days_from_ce() + 1); + } + } + + #[test] + fn test_date_succ() { + let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d); + assert_eq!(ymd(2014, 5, 6).succ_opt(), Some(ymd(2014, 5, 7))); + assert_eq!(ymd(2014, 5, 31).succ_opt(), Some(ymd(2014, 6, 1))); + assert_eq!(ymd(2014, 12, 31).succ_opt(), Some(ymd(2015, 1, 1))); + assert_eq!(ymd(2016, 2, 28).succ_opt(), Some(ymd(2016, 2, 29))); + assert_eq!(ymd(MAX.year(), 12, 31).succ_opt(), None); + } + + #[test] + fn test_date_pred() { + let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d); + assert_eq!(ymd(2016, 3, 1).pred_opt(), Some(ymd(2016, 2, 29))); + assert_eq!(ymd(2015, 1, 1).pred_opt(), Some(ymd(2014, 12, 31))); + assert_eq!(ymd(2014, 6, 1).pred_opt(), Some(ymd(2014, 5, 31))); + assert_eq!(ymd(2014, 5, 7).pred_opt(), Some(ymd(2014, 5, 6))); + assert_eq!(ymd(MIN.year(), 1, 1).pred_opt(), None); + } + + #[test] + fn test_date_add() { + fn check((y1,m1,d1): (i32, u32, u32), rhs: Duration, ymd: Option<(i32, u32, u32)>) { + let lhs = NaiveDate::from_ymd(y1, m1, d1); + let sum = ymd.map(|(y,m,d)| NaiveDate::from_ymd(y, m, d)); + assert_eq!(lhs.checked_add(rhs), sum); + assert_eq!(lhs.checked_sub(-rhs), sum); + } + + check((2014, 1, 1), Duration::zero(), Some((2014, 1, 1))); + check((2014, 1, 1), Duration::seconds(86399), Some((2014, 1, 1))); + // always round towards zero + check((2014, 1, 1), Duration::seconds(-86399), Some((2014, 1, 1))); + check((2014, 1, 1), Duration::days(1), Some((2014, 1, 2))); + check((2014, 1, 1), Duration::days(-1), Some((2013, 12, 31))); + check((2014, 1, 1), Duration::days(364), Some((2014, 12, 31))); + check((2014, 1, 1), Duration::days(365*4 + 1), Some((2018, 1, 1))); + check((2014, 1, 1), Duration::days(365*400 + 97), Some((2414, 1, 1))); + + check((-7, 1, 1), Duration::days(365*12 + 3), Some((5, 1, 1))); + + // overflow check + check((0, 1, 1), Duration::days(MAX_DAYS_FROM_YEAR_0 as i64), Some((MAX_YEAR, 12, 31))); + check((0, 1, 1), Duration::days(MAX_DAYS_FROM_YEAR_0 as i64 + 1), None); + check((0, 1, 1), Duration::max_value(), None); + check((0, 1, 1), Duration::days(MIN_DAYS_FROM_YEAR_0 as i64), Some((MIN_YEAR, 1, 1))); + check((0, 1, 1), Duration::days(MIN_DAYS_FROM_YEAR_0 as i64 - 1), None); + check((0, 1, 1), Duration::min_value(), None); + } + + #[test] + fn test_date_sub() { + fn check((y1,m1,d1): (i32, u32, u32), (y2,m2,d2): (i32, u32, u32), diff: Duration) { + let lhs = NaiveDate::from_ymd(y1, m1, d1); + let rhs = NaiveDate::from_ymd(y2, m2, d2); + assert_eq!(lhs - rhs, diff); + assert_eq!(rhs - lhs, -diff); + } + + check((2014, 1, 1), (2014, 1, 1), Duration::zero()); + check((2014, 1, 2), (2014, 1, 1), Duration::days(1)); + check((2014, 12, 31), (2014, 1, 1), Duration::days(364)); + check((2015, 1, 3), (2014, 1, 1), Duration::days(365 + 2)); + check((2018, 1, 1), (2014, 1, 1), Duration::days(365*4 + 1)); + check((2414, 1, 1), (2014, 1, 1), Duration::days(365*400 + 97)); + + check((MAX_YEAR, 12, 31), (0, 1, 1), Duration::days(MAX_DAYS_FROM_YEAR_0 as i64)); + check((MIN_YEAR, 1, 1), (0, 1, 1), Duration::days(MIN_DAYS_FROM_YEAR_0 as i64)); + } + + #[test] + fn test_date_fmt() { + assert_eq!(format!("{:?}", NaiveDate::from_ymd(2012, 3, 4)), "2012-03-04"); + assert_eq!(format!("{:?}", NaiveDate::from_ymd(0, 3, 4)), "0000-03-04"); + assert_eq!(format!("{:?}", NaiveDate::from_ymd(-307, 3, 4)), "-0307-03-04"); + assert_eq!(format!("{:?}", NaiveDate::from_ymd(12345, 3, 4)), "+12345-03-04"); + + assert_eq!(NaiveDate::from_ymd(2012, 3, 4).to_string(), "2012-03-04"); + assert_eq!(NaiveDate::from_ymd(0, 3, 4).to_string(), "0000-03-04"); + assert_eq!(NaiveDate::from_ymd(-307, 3, 4).to_string(), "-0307-03-04"); + assert_eq!(NaiveDate::from_ymd(12345, 3, 4).to_string(), "+12345-03-04"); + + // the format specifier should have no effect on `NaiveTime` + assert_eq!(format!("{:+30?}", NaiveDate::from_ymd(1234, 5, 6)), "1234-05-06"); + assert_eq!(format!("{:30?}", NaiveDate::from_ymd(12345, 6, 7)), "+12345-06-07"); + } + + #[test] + fn test_date_from_str() { + // valid cases + let valid = [ + "-0000000123456-1-2", + " -123456 - 1 - 2 ", + "-12345-1-2", + "-1234-12-31", + "-7-6-5", + "350-2-28", + "360-02-29", + "0360-02-29", + "2015-2 -18", + "+70-2-18", + "+70000-2-18", + "+00007-2-18", + ]; + for &s in &valid { + let d = match s.parse::() { + Ok(d) => d, + Err(e) => panic!("parsing `{}` has failed: {}", s, e) + }; + let s_ = format!("{:?}", d); + // `s` and `s_` may differ, but `s.parse()` and `s_.parse()` must be same + let d_ = match s_.parse::() { + Ok(d) => d, + Err(e) => panic!("`{}` is parsed into `{:?}`, but reparsing that has failed: {}", + s, d, e) + }; + assert!(d == d_, "`{}` is parsed into `{:?}`, but reparsed result \ + `{:?}` does not match", s, d, d_); + } + + // some invalid cases + // since `ParseErrorKind` is private, all we can do is to check if there was an error + assert!("".parse::().is_err()); + assert!("x".parse::().is_err()); + assert!("2014".parse::().is_err()); + assert!("2014-01".parse::().is_err()); + assert!("2014-01-00".parse::().is_err()); + assert!("2014-13-57".parse::().is_err()); + assert!("9999999-9-9".parse::().is_err()); // out-of-bounds + } + + #[test] + fn test_date_parse_from_str() { + let ymd = |y,m,d| NaiveDate::from_ymd(y,m,d); + assert_eq!(NaiveDate::parse_from_str("2014-5-7T12:34:56+09:30", "%Y-%m-%dT%H:%M:%S%z"), + Ok(ymd(2014, 5, 7))); // ignore time and offset + assert_eq!(NaiveDate::parse_from_str("2015-W06-1=2015-033", "%G-W%V-%u = %Y-%j"), + Ok(ymd(2015, 2, 2))); + assert_eq!(NaiveDate::parse_from_str("Fri, 09 Aug 13", "%a, %d %b %y"), + Ok(ymd(2013, 8, 9))); + assert!(NaiveDate::parse_from_str("Sat, 09 Aug 2013", "%a, %d %b %Y").is_err()); + assert!(NaiveDate::parse_from_str("2014-57", "%Y-%m-%d").is_err()); + assert!(NaiveDate::parse_from_str("2014", "%Y").is_err()); // insufficient + } + + #[test] + fn test_date_format() { + let d = NaiveDate::from_ymd(2012, 3, 4); + assert_eq!(d.format("%Y,%C,%y,%G,%g").to_string(), "2012,20,12,2012,12"); + assert_eq!(d.format("%m,%b,%h,%B").to_string(), "03,Mar,Mar,March"); + assert_eq!(d.format("%d,%e").to_string(), "04, 4"); + assert_eq!(d.format("%U,%W,%V").to_string(), "10,09,09"); + assert_eq!(d.format("%a,%A,%w,%u").to_string(), "Sun,Sunday,0,7"); + assert_eq!(d.format("%j").to_string(), "064"); // since 2012 is a leap year + assert_eq!(d.format("%D,%x").to_string(), "03/04/12,03/04/12"); + assert_eq!(d.format("%F").to_string(), "2012-03-04"); + assert_eq!(d.format("%v").to_string(), " 4-Mar-2012"); + assert_eq!(d.format("%t%n%%%n%t").to_string(), "\t\n%\n\t"); + + // non-four-digit years + assert_eq!(NaiveDate::from_ymd(12345, 1, 1).format("%Y").to_string(), "+12345"); + assert_eq!(NaiveDate::from_ymd(1234, 1, 1).format("%Y").to_string(), "1234"); + assert_eq!(NaiveDate::from_ymd(123, 1, 1).format("%Y").to_string(), "0123"); + assert_eq!(NaiveDate::from_ymd(12, 1, 1).format("%Y").to_string(), "0012"); + assert_eq!(NaiveDate::from_ymd(1, 1, 1).format("%Y").to_string(), "0001"); + assert_eq!(NaiveDate::from_ymd(0, 1, 1).format("%Y").to_string(), "0000"); + assert_eq!(NaiveDate::from_ymd(-1, 1, 1).format("%Y").to_string(), "-0001"); + assert_eq!(NaiveDate::from_ymd(-12, 1, 1).format("%Y").to_string(), "-0012"); + assert_eq!(NaiveDate::from_ymd(-123, 1, 1).format("%Y").to_string(), "-0123"); + assert_eq!(NaiveDate::from_ymd(-1234, 1, 1).format("%Y").to_string(), "-1234"); + assert_eq!(NaiveDate::from_ymd(-12345, 1, 1).format("%Y").to_string(), "-12345"); + + // corner cases + assert_eq!(NaiveDate::from_ymd(2007, 12, 31).format("%G,%g,%U,%W,%V").to_string(), + "2008,08,53,53,01"); + assert_eq!(NaiveDate::from_ymd(2010, 1, 3).format("%G,%g,%U,%W,%V").to_string(), + "2009,09,01,00,53"); + } +} + +/// The internal implementation of the calendar and ordinal date. +/// +/// The current implementation is optimized for determining year, month, day and day of week. +/// 4-bit `YearFlags` map to one of 14 possible classes of year in the Gregorian calendar, +/// which are included in every packed `NaiveDate` instance. +/// The conversion between the packed calendar date (`Mdf`) and the ordinal date (`Of`) is +/// based on the moderately-sized lookup table (~1.5KB) +/// and the packed representation is chosen for the efficient lookup. +/// Every internal data structure does not validate its input, +/// but the conversion keeps the valid value valid and the invalid value invalid +/// so that the user-facing `NaiveDate` can validate the input as late as possible. +#[allow(dead_code)] // some internal methods have been left for consistency +mod internals { + use std::{i32, fmt}; + use num::traits::FromPrimitive; + use Weekday; + use div::{div_rem, mod_floor}; + + /// The internal date representation. This also includes the packed `Mdf` value. + pub type DateImpl = i32; + + pub const MAX_YEAR: DateImpl = i32::MAX >> 13; + pub const MIN_YEAR: DateImpl = i32::MIN >> 13; + + /// The year flags (aka the dominical letter). + /// + /// There are 14 possible classes of year in the Gregorian calendar: + /// common and leap years starting with Monday through Sunday. + /// The `YearFlags` stores this information into 4 bits `abbb`, + /// where `a` is `1` for the common year (simplifies the `Of` validation) + /// and `bbb` is a non-zero `Weekday` (mapping `Mon` to 7) of the last day in the past year + /// (simplifies the day of week calculation from the 1-based ordinal). + #[derive(PartialEq, Eq, Copy, Clone)] + pub struct YearFlags(pub u8); + + pub const A: YearFlags = YearFlags(0o15); pub const AG: YearFlags = YearFlags(0o05); + pub const B: YearFlags = YearFlags(0o14); pub const BA: YearFlags = YearFlags(0o04); + pub const C: YearFlags = YearFlags(0o13); pub const CB: YearFlags = YearFlags(0o03); + pub const D: YearFlags = YearFlags(0o12); pub const DC: YearFlags = YearFlags(0o02); + pub const E: YearFlags = YearFlags(0o11); pub const ED: YearFlags = YearFlags(0o01); + pub const F: YearFlags = YearFlags(0o17); pub const FE: YearFlags = YearFlags(0o07); + pub const G: YearFlags = YearFlags(0o16); pub const GF: YearFlags = YearFlags(0o06); + + static YEAR_TO_FLAGS: [YearFlags; 400] = [ + BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, + ED, C, B, A, GF, E, D, C, BA, G, F, E, DC, B, A, G, FE, D, C, B, + AG, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C, BA, G, F, E, + DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, + GF, E, D, C, BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D, // 100 + C, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, + GF, E, D, C, BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D, + CB, A, G, F, ED, C, B, A, GF, E, D, C, BA, G, F, E, DC, B, A, G, + FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C, + BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, // 200 + E, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C, + BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, + ED, C, B, A, GF, E, D, C, BA, G, F, E, DC, B, A, G, FE, D, C, B, + AG, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C, BA, G, F, E, + DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, // 300 + G, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C, BA, G, F, E, + DC, B, A, G, FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, + GF, E, D, C, BA, G, F, E, DC, B, A, G, FE, D, C, B, AG, F, E, D, + CB, A, G, F, ED, C, B, A, GF, E, D, C, BA, G, F, E, DC, B, A, G, + FE, D, C, B, AG, F, E, D, CB, A, G, F, ED, C, B, A, GF, E, D, C, // 400 + ]; + + static YEAR_DELTAS: [u8; 401] = [ + 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, + 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, + 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, + 15, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, + 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, // 100 + 25, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 28, 28, 28, 28, 29, 29, 29, + 29, 30, 30, 30, 30, 31, 31, 31, 31, 32, 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, + 34, 35, 35, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 39, 39, 39, + 39, 40, 40, 40, 40, 41, 41, 41, 41, 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, + 44, 45, 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47, 48, 48, 48, 48, 49, 49, 49, // 200 + 49, 49, 49, 49, 49, 50, 50, 50, 50, 51, 51, 51, 51, 52, 52, 52, 52, 53, 53, 53, + 53, 54, 54, 54, 54, 55, 55, 55, 55, 56, 56, 56, 56, 57, 57, 57, 57, 58, 58, 58, + 58, 59, 59, 59, 59, 60, 60, 60, 60, 61, 61, 61, 61, 62, 62, 62, 62, 63, 63, 63, + 63, 64, 64, 64, 64, 65, 65, 65, 65, 66, 66, 66, 66, 67, 67, 67, 67, 68, 68, 68, + 68, 69, 69, 69, 69, 70, 70, 70, 70, 71, 71, 71, 71, 72, 72, 72, 72, 73, 73, 73, // 300 + 73, 73, 73, 73, 73, 74, 74, 74, 74, 75, 75, 75, 75, 76, 76, 76, 76, 77, 77, 77, + 77, 78, 78, 78, 78, 79, 79, 79, 79, 80, 80, 80, 80, 81, 81, 81, 81, 82, 82, 82, + 82, 83, 83, 83, 83, 84, 84, 84, 84, 85, 85, 85, 85, 86, 86, 86, 86, 87, 87, 87, + 87, 88, 88, 88, 88, 89, 89, 89, 89, 90, 90, 90, 90, 91, 91, 91, 91, 92, 92, 92, + 92, 93, 93, 93, 93, 94, 94, 94, 94, 95, 95, 95, 95, 96, 96, 96, 96, 97, 97, 97, 97 // 400+1 + ]; + + pub fn cycle_to_yo(cycle: u32) -> (u32, u32) { + let (mut year_mod_400, mut ordinal0) = div_rem(cycle, 365); + let delta = YEAR_DELTAS[year_mod_400 as usize] as u32; + if ordinal0 < delta { + year_mod_400 -= 1; + ordinal0 += 365 - YEAR_DELTAS[year_mod_400 as usize] as u32; + } else { + ordinal0 -= delta; + } + (year_mod_400, ordinal0 + 1) + } + + pub fn yo_to_cycle(year_mod_400: u32, ordinal: u32) -> u32 { + year_mod_400 * 365 + YEAR_DELTAS[year_mod_400 as usize] as u32 + ordinal - 1 + } + + impl YearFlags { + #[inline] + pub fn from_year(year: i32) -> YearFlags { + let year = mod_floor(year, 400); + YearFlags::from_year_mod_400(year) + } + + #[inline] + pub fn from_year_mod_400(year: i32) -> YearFlags { + YEAR_TO_FLAGS[year as usize] + } + + #[inline] + pub fn ndays(&self) -> u32 { + let YearFlags(flags) = *self; + 366 - (flags >> 3) as u32 + } + + #[inline] + pub fn isoweek_delta(&self) -> u32 { + let YearFlags(flags) = *self; + let mut delta = flags as u32 & 0b111; + if delta < 3 { delta += 7; } + delta + } + + #[inline] + pub fn nisoweeks(&self) -> u32 { + let YearFlags(flags) = *self; + 52 + ((0b00000100_00000110 >> flags as usize) & 1) + } + } + + impl fmt::Debug for YearFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let YearFlags(flags) = *self; + match flags { + 0o15 => "A".fmt(f), 0o05 => "AG".fmt(f), + 0o14 => "B".fmt(f), 0o04 => "BA".fmt(f), + 0o13 => "C".fmt(f), 0o03 => "CB".fmt(f), + 0o12 => "D".fmt(f), 0o02 => "DC".fmt(f), + 0o11 => "E".fmt(f), 0o01 => "ED".fmt(f), + 0o10 => "F?".fmt(f), 0o00 => "FE?".fmt(f), // non-canonical + 0o17 => "F".fmt(f), 0o07 => "FE".fmt(f), + 0o16 => "G".fmt(f), 0o06 => "GF".fmt(f), + _ => write!(f, "YearFlags({})", flags), + } + } + } + + pub const MIN_OL: u32 = 1 << 1; + pub const MAX_OL: u32 = 366 << 1; // larger than the non-leap last day `(365 << 1) | 1` + pub const MIN_MDL: u32 = (1 << 6) | (1 << 1); + pub const MAX_MDL: u32 = (12 << 6) | (31 << 1) | 1; + + const XX: i8 = -128; + static MDL_TO_OL: [i8; (MAX_MDL as usize + 1)] = [ + XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, + XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, + XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, + XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0 + XX, XX, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, // 1 + XX, XX, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, XX, XX, XX, XX, XX, // 2 + XX, XX, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, + 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, + 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, + 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, // 3 + XX, XX, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, + 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, + 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, + 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, XX, XX, // 4 + XX, XX, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, + 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, + 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, + 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, // 5 + XX, XX, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, + 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, + 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, + 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, XX, XX, // 6 + XX, XX, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, + 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, + 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, + 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, // 7 + XX, XX, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, + 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, + 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, + 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, // 8 + XX, XX, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, + 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, + 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, + 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, XX, XX, // 9 + XX, XX, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, + 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, + 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, + 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, // 10 + XX, XX, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, + 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, + 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, + 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, XX, XX, // 11 + XX, XX, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, + 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, + 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, + 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, // 12 + ]; + + static OL_TO_MDL: [u8; (MAX_OL as usize + 1)] = [ + 0, 0, // 0 + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, // 1 + 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, 66, // 2 + 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, + 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, + 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, + 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, 74, 72, // 3 + 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, + 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, + 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, + 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, 76, 74, // 4 + 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, + 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, + 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, + 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, 80, 78, // 5 + 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, + 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, + 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, + 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, 82, 80, // 6 + 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, + 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, + 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, + 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, 86, 84, // 7 + 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, + 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, + 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, + 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, 88, 86, // 8 + 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, + 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, + 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, + 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, 90, 88, // 9 + 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, + 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, + 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, + 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, 94, 92, // 10 + 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, + 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, + 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, + 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, 96, 94, // 11 + 100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, + 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, + 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, + 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98,100, 98, // 12 + ]; + + /// Ordinal (day of year) and year flags: `(ordinal << 4) | flags`. + /// + /// The whole bits except for the least 3 bits are referred as `Ol` (ordinal and leap flag), + /// which is an index to the `OL_TO_MDL` lookup table. + #[derive(PartialEq, PartialOrd, Copy, Clone)] + pub struct Of(pub u32); + + impl Of { + #[inline] + fn clamp_ordinal(ordinal: u32) -> u32 { + if ordinal > 366 {0} else {ordinal} + } + + #[inline] + pub fn new(ordinal: u32, YearFlags(flags): YearFlags) -> Of { + let ordinal = Of::clamp_ordinal(ordinal); + Of((ordinal << 4) | (flags as u32)) + } + + #[inline] + pub fn from_mdf(Mdf(mdf): Mdf) -> Of { + let mdl = mdf >> 3; + match MDL_TO_OL.get(mdl as usize) { + Some(&v) => Of(mdf.wrapping_sub((v as i32 as u32 & 0x3ff) << 3)), + None => Of(0) + } + } + + #[inline] + pub fn valid(&self) -> bool { + let Of(of) = *self; + let ol = of >> 3; + MIN_OL <= ol && ol <= MAX_OL + } + + #[inline] + pub fn ordinal(&self) -> u32 { + let Of(of) = *self; + of >> 4 + } + + #[inline] + pub fn with_ordinal(&self, ordinal: u32) -> Of { + let ordinal = Of::clamp_ordinal(ordinal); + let Of(of) = *self; + Of((of & 0b1111) | (ordinal << 4)) + } + + #[inline] + pub fn flags(&self) -> YearFlags { + let Of(of) = *self; + YearFlags((of & 0b1111) as u8) + } + + #[inline] + pub fn with_flags(&self, YearFlags(flags): YearFlags) -> Of { + let Of(of) = *self; + Of((of & !0b1111) | (flags as u32)) + } + + #[inline] + pub fn weekday(&self) -> Weekday { + let Of(of) = *self; + Weekday::from_u32(((of >> 4) + (of & 0b111)) % 7).unwrap() + } + + #[inline] + pub fn isoweekdate_raw(&self) -> (u32, Weekday) { + // week ordinal = ordinal + delta + let Of(of) = *self; + let weekord = (of >> 4).wrapping_add(self.flags().isoweek_delta()); + (weekord / 7, Weekday::from_u32(weekord % 7).unwrap()) + } + + #[inline] + pub fn to_mdf(&self) -> Mdf { + Mdf::from_of(*self) + } + + #[inline] + pub fn succ(&self) -> Of { + let Of(of) = *self; + Of(of + (1 << 4)) + } + + #[inline] + pub fn pred(&self) -> Of { + let Of(of) = *self; + Of(of - (1 << 4)) + } + } + + impl fmt::Debug for Of { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let Of(of) = *self; + write!(f, "Of(({} << 4) | {:#04o} /*{:?}*/)", + of >> 4, of & 0b1111, YearFlags((of & 0b1111) as u8)) + } + } + + /// Month, day of month and year flags: `(month << 9) | (day << 4) | flags` + /// + /// The whole bits except for the least 3 bits are referred as `Mdl` + /// (month, day of month and leap flag), + /// which is an index to the `MDL_TO_OL` lookup table. + #[derive(PartialEq, PartialOrd, Copy, Clone)] + pub struct Mdf(pub u32); + + impl Mdf { + #[inline] + fn clamp_month(month: u32) -> u32 { + if month > 12 {0} else {month} + } + + #[inline] + fn clamp_day(day: u32) -> u32 { + if day > 31 {0} else {day} + } + + #[inline] + pub fn new(month: u32, day: u32, YearFlags(flags): YearFlags) -> Mdf { + let month = Mdf::clamp_month(month); + let day = Mdf::clamp_day(day); + Mdf((month << 9) | (day << 4) | (flags as u32)) + } + + #[inline] + pub fn from_of(Of(of): Of) -> Mdf { + let ol = of >> 3; + match OL_TO_MDL.get(ol as usize) { + Some(&v) => Mdf(of + ((v as u32) << 3)), + None => Mdf(0) + } + } + + #[inline] + pub fn valid(&self) -> bool { + let Mdf(mdf) = *self; + let mdl = mdf >> 3; + match MDL_TO_OL.get(mdl as usize) { + Some(&v) => v >= 0, + None => false + } + } + + #[inline] + pub fn month(&self) -> u32 { + let Mdf(mdf) = *self; + mdf >> 9 + } + + #[inline] + pub fn with_month(&self, month: u32) -> Mdf { + let month = Mdf::clamp_month(month); + let Mdf(mdf) = *self; + Mdf((mdf & 0b11111_1111) | (month << 9)) + } + + #[inline] + pub fn day(&self) -> u32 { + let Mdf(mdf) = *self; + (mdf >> 4) & 0b11111 + } + + #[inline] + pub fn with_day(&self, day: u32) -> Mdf { + let day = Mdf::clamp_day(day); + let Mdf(mdf) = *self; + Mdf((mdf & !0b11111_0000) | (day << 4)) + } + + #[inline] + pub fn flags(&self) -> YearFlags { + let Mdf(mdf) = *self; + YearFlags((mdf & 0b1111) as u8) + } + + #[inline] + pub fn with_flags(&self, YearFlags(flags): YearFlags) -> Mdf { + let Mdf(mdf) = *self; + Mdf((mdf & !0b1111) | (flags as u32)) + } + + #[inline] + pub fn to_of(&self) -> Of { + Of::from_mdf(*self) + } + } + + impl fmt::Debug for Mdf { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let Mdf(mdf) = *self; + write!(f, "Mdf(({} << 9) | ({} << 4) | {:#04o} /*{:?}*/)", + mdf >> 9, (mdf >> 4) & 0b11111, mdf & 0b1111, YearFlags((mdf & 0b1111) as u8)) + } + } + + #[cfg(test)] + mod tests { + #[cfg(bench)] extern crate test; + + use Weekday; + use super::{Of, Mdf}; + use super::{YearFlags, A, B, C, D, E, F, G, AG, BA, CB, DC, ED, FE, GF}; + use num::iter::range_inclusive; + use std::u32; + + const NONLEAP_FLAGS: [YearFlags; 7] = [A, B, C, D, E, F, G]; + const LEAP_FLAGS: [YearFlags; 7] = [AG, BA, CB, DC, ED, FE, GF]; + const FLAGS: [YearFlags; 14] = [A, B, C, D, E, F, G, AG, BA, CB, DC, ED, FE, GF]; + + #[test] + fn test_year_flags_ndays_from_year() { + assert_eq!(YearFlags::from_year(2014).ndays(), 365); + assert_eq!(YearFlags::from_year(2012).ndays(), 366); + assert_eq!(YearFlags::from_year(2000).ndays(), 366); + assert_eq!(YearFlags::from_year(1900).ndays(), 365); + assert_eq!(YearFlags::from_year(1600).ndays(), 366); + assert_eq!(YearFlags::from_year( 1).ndays(), 365); + assert_eq!(YearFlags::from_year( 0).ndays(), 366); // 1 BCE (proleptic Gregorian) + assert_eq!(YearFlags::from_year( -1).ndays(), 365); // 2 BCE + assert_eq!(YearFlags::from_year( -4).ndays(), 366); // 5 BCE + assert_eq!(YearFlags::from_year( -99).ndays(), 365); // 100 BCE + assert_eq!(YearFlags::from_year(-100).ndays(), 365); // 101 BCE + assert_eq!(YearFlags::from_year(-399).ndays(), 365); // 400 BCE + assert_eq!(YearFlags::from_year(-400).ndays(), 366); // 401 BCE + } + + #[test] + fn test_year_flags_nisoweeks() { + assert_eq!(A.nisoweeks(), 52); + assert_eq!(B.nisoweeks(), 52); + assert_eq!(C.nisoweeks(), 52); + assert_eq!(D.nisoweeks(), 53); + assert_eq!(E.nisoweeks(), 52); + assert_eq!(F.nisoweeks(), 52); + assert_eq!(G.nisoweeks(), 52); + assert_eq!(AG.nisoweeks(), 52); + assert_eq!(BA.nisoweeks(), 52); + assert_eq!(CB.nisoweeks(), 52); + assert_eq!(DC.nisoweeks(), 53); + assert_eq!(ED.nisoweeks(), 53); + assert_eq!(FE.nisoweeks(), 52); + assert_eq!(GF.nisoweeks(), 52); + } + + #[cfg(bench)] + #[bench] + fn bench_year_flags_from_year(bh: &mut test::Bencher) { + bh.iter(|| { + for year in -999i32..1000 { + YearFlags::from_year(year); + } + }); + } + + #[test] + fn test_of() { + fn check(expected: bool, flags: YearFlags, ordinal1: u32, ordinal2: u32) { + for ordinal in range_inclusive(ordinal1, ordinal2) { + let of = Of::new(ordinal, flags); + assert!(of.valid() == expected, + "ordinal {} = {:?} should be {} for dominical year {:?}", + ordinal, of, if expected {"valid"} else {"invalid"}, flags); + } + } + + for &flags in NONLEAP_FLAGS.iter() { + check(false, flags, 0, 0); + check(true, flags, 1, 365); + check(false, flags, 366, 1024); + check(false, flags, u32::MAX, u32::MAX); + } + + for &flags in LEAP_FLAGS.iter() { + check(false, flags, 0, 0); + check(true, flags, 1, 366); + check(false, flags, 367, 1024); + check(false, flags, u32::MAX, u32::MAX); + } + } + + #[test] + fn test_mdf_valid() { + fn check(expected: bool, flags: YearFlags, month1: u32, day1: u32, + month2: u32, day2: u32) { + for month in range_inclusive(month1, month2) { + for day in range_inclusive(day1, day2) { + let mdf = Mdf::new(month, day, flags); + assert!(mdf.valid() == expected, + "month {} day {} = {:?} should be {} for dominical year {:?}", + month, day, mdf, if expected {"valid"} else {"invalid"}, flags); + } + } + } + + for &flags in NONLEAP_FLAGS.iter() { + check(false, flags, 0, 0, 0, 1024); + check(false, flags, 0, 0, 16, 0); + check(true, flags, 1, 1, 1, 31); check(false, flags, 1, 32, 1, 1024); + check(true, flags, 2, 1, 2, 28); check(false, flags, 2, 29, 2, 1024); + check(true, flags, 3, 1, 3, 31); check(false, flags, 3, 32, 3, 1024); + check(true, flags, 4, 1, 4, 30); check(false, flags, 4, 31, 4, 1024); + check(true, flags, 5, 1, 5, 31); check(false, flags, 5, 32, 5, 1024); + check(true, flags, 6, 1, 6, 30); check(false, flags, 6, 31, 6, 1024); + check(true, flags, 7, 1, 7, 31); check(false, flags, 7, 32, 7, 1024); + check(true, flags, 8, 1, 8, 31); check(false, flags, 8, 32, 8, 1024); + check(true, flags, 9, 1, 9, 30); check(false, flags, 9, 31, 9, 1024); + check(true, flags, 10, 1, 10, 31); check(false, flags, 10, 32, 10, 1024); + check(true, flags, 11, 1, 11, 30); check(false, flags, 11, 31, 11, 1024); + check(true, flags, 12, 1, 12, 31); check(false, flags, 12, 32, 12, 1024); + check(false, flags, 13, 0, 16, 1024); + check(false, flags, u32::MAX, 0, u32::MAX, 1024); + check(false, flags, 0, u32::MAX, 16, u32::MAX); + check(false, flags, u32::MAX, u32::MAX, u32::MAX, u32::MAX); + } + + for &flags in LEAP_FLAGS.iter() { + check(false, flags, 0, 0, 0, 1024); + check(false, flags, 0, 0, 16, 0); + check(true, flags, 1, 1, 1, 31); check(false, flags, 1, 32, 1, 1024); + check(true, flags, 2, 1, 2, 29); check(false, flags, 2, 30, 2, 1024); + check(true, flags, 3, 1, 3, 31); check(false, flags, 3, 32, 3, 1024); + check(true, flags, 4, 1, 4, 30); check(false, flags, 4, 31, 4, 1024); + check(true, flags, 5, 1, 5, 31); check(false, flags, 5, 32, 5, 1024); + check(true, flags, 6, 1, 6, 30); check(false, flags, 6, 31, 6, 1024); + check(true, flags, 7, 1, 7, 31); check(false, flags, 7, 32, 7, 1024); + check(true, flags, 8, 1, 8, 31); check(false, flags, 8, 32, 8, 1024); + check(true, flags, 9, 1, 9, 30); check(false, flags, 9, 31, 9, 1024); + check(true, flags, 10, 1, 10, 31); check(false, flags, 10, 32, 10, 1024); + check(true, flags, 11, 1, 11, 30); check(false, flags, 11, 31, 11, 1024); + check(true, flags, 12, 1, 12, 31); check(false, flags, 12, 32, 12, 1024); + check(false, flags, 13, 0, 16, 1024); + check(false, flags, u32::MAX, 0, u32::MAX, 1024); + check(false, flags, 0, u32::MAX, 16, u32::MAX); + check(false, flags, u32::MAX, u32::MAX, u32::MAX, u32::MAX); + } + } + + #[test] + fn test_of_fields() { + for &flags in FLAGS.iter() { + for ordinal in range_inclusive(1u32, 366) { + let of = Of::new(ordinal, flags); + if of.valid() { + assert_eq!(of.ordinal(), ordinal); + } + } + } + } + + #[test] + fn test_of_with_fields() { + fn check(flags: YearFlags, ordinal: u32) { + let of = Of::new(ordinal, flags); + + for ordinal in range_inclusive(0u32, 1024) { + let of = of.with_ordinal(ordinal); + assert_eq!(of.valid(), Of::new(ordinal, flags).valid()); + if of.valid() { + assert_eq!(of.ordinal(), ordinal); + } + } + } + + for &flags in NONLEAP_FLAGS.iter() { + check(flags, 1); + check(flags, 365); + } + for &flags in LEAP_FLAGS.iter() { + check(flags, 1); + check(flags, 366); + } + } + + #[test] + fn test_of_weekday() { + assert_eq!(Of::new(1, A).weekday(), Weekday::Sun); + assert_eq!(Of::new(1, B).weekday(), Weekday::Sat); + assert_eq!(Of::new(1, C).weekday(), Weekday::Fri); + assert_eq!(Of::new(1, D).weekday(), Weekday::Thu); + assert_eq!(Of::new(1, E).weekday(), Weekday::Wed); + assert_eq!(Of::new(1, F).weekday(), Weekday::Tue); + assert_eq!(Of::new(1, G).weekday(), Weekday::Mon); + assert_eq!(Of::new(1, AG).weekday(), Weekday::Sun); + assert_eq!(Of::new(1, BA).weekday(), Weekday::Sat); + assert_eq!(Of::new(1, CB).weekday(), Weekday::Fri); + assert_eq!(Of::new(1, DC).weekday(), Weekday::Thu); + assert_eq!(Of::new(1, ED).weekday(), Weekday::Wed); + assert_eq!(Of::new(1, FE).weekday(), Weekday::Tue); + assert_eq!(Of::new(1, GF).weekday(), Weekday::Mon); + + for &flags in FLAGS.iter() { + let mut prev = Of::new(1, flags).weekday(); + for ordinal in range_inclusive(2u32, flags.ndays()) { + let of = Of::new(ordinal, flags); + let expected = prev.succ(); + assert_eq!(of.weekday(), expected); + prev = expected; + } + } + } + + #[test] + fn test_mdf_fields() { + for &flags in FLAGS.iter() { + for month in range_inclusive(1u32, 12) { + for day in range_inclusive(1u32, 31) { + let mdf = Mdf::new(month, day, flags); + if mdf.valid() { + assert_eq!(mdf.month(), month); + assert_eq!(mdf.day(), day); + } + } + } + } + } + + #[test] + fn test_mdf_with_fields() { + fn check(flags: YearFlags, month: u32, day: u32) { + let mdf = Mdf::new(month, day, flags); + + for month in range_inclusive(0u32, 16) { + let mdf = mdf.with_month(month); + assert_eq!(mdf.valid(), Mdf::new(month, day, flags).valid()); + if mdf.valid() { + assert_eq!(mdf.month(), month); + assert_eq!(mdf.day(), day); + } + } + + for day in range_inclusive(0u32, 1024) { + let mdf = mdf.with_day(day); + assert_eq!(mdf.valid(), Mdf::new(month, day, flags).valid()); + if mdf.valid() { + assert_eq!(mdf.month(), month); + assert_eq!(mdf.day(), day); + } + } + } + + for &flags in NONLEAP_FLAGS.iter() { + check(flags, 1, 1); + check(flags, 1, 31); + check(flags, 2, 1); + check(flags, 2, 28); + check(flags, 2, 29); + check(flags, 12, 31); + } + for &flags in LEAP_FLAGS.iter() { + check(flags, 1, 1); + check(flags, 1, 31); + check(flags, 2, 1); + check(flags, 2, 29); + check(flags, 2, 30); + check(flags, 12, 31); + } + } + + #[test] + fn test_of_isoweekdate_raw() { + for &flags in FLAGS.iter() { + // January 4 should be in the first week + let (week, _) = Of::new(4 /* January 4 */, flags).isoweekdate_raw(); + assert_eq!(week, 1); + } + } + + #[test] + fn test_of_to_mdf() { + for i in range_inclusive(0u32, 8192) { + let of = Of(i); + assert_eq!(of.valid(), of.to_mdf().valid()); + } + } + + #[test] + fn test_mdf_to_of() { + for i in range_inclusive(0u32, 8192) { + let mdf = Mdf(i); + assert_eq!(mdf.valid(), mdf.to_of().valid()); + } + } + + #[test] + fn test_of_to_mdf_to_of() { + for i in range_inclusive(0u32, 8192) { + let of = Of(i); + if of.valid() { + assert_eq!(of, of.to_mdf().to_of()); + } + } + } + + #[test] + fn test_mdf_to_of_to_mdf() { + for i in range_inclusive(0u32, 8192) { + let mdf = Mdf(i); + if mdf.valid() { + assert_eq!(mdf, mdf.to_of().to_mdf()); + } + } + } + } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/naive/datetime.rs cargo-0.19.0/vendor/chrono-0.2.25/src/naive/datetime.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/naive/datetime.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/naive/datetime.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,1134 @@ +// This is a part of rust-chrono. +// Copyright (c) 2014-2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +//! ISO 8601 date and time without timezone. + +use std::{str, fmt, hash}; +use std::ops::{Add, Sub}; +use num::traits::ToPrimitive; + +use {Weekday, Timelike, Datelike}; +use div::div_mod_floor; +use duration::Duration; +use naive::time::NaiveTime; +use naive::date::NaiveDate; +use format::{Item, Numeric, Pad, Fixed}; +use format::{parse, Parsed, ParseError, ParseResult, DelayedFormat, StrftimeItems}; + +/// ISO 8601 combined date and time without timezone. +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] +pub struct NaiveDateTime { + date: NaiveDate, + time: NaiveTime, +} + +impl NaiveDateTime { + /// Makes a new `NaiveDateTime` from date and time components. + /// Equivalent to `date.and_time(time)` and many other helper constructors on `NaiveDate`. + #[inline] + pub fn new(date: NaiveDate, time: NaiveTime) -> NaiveDateTime { + NaiveDateTime { date: date, time: time } + } + + /// Makes a new `NaiveDateTime` from the number of non-leap seconds + /// since the midnight UTC on January 1, 1970 (aka "UNIX timestamp") + /// and the number of nanoseconds since the last whole non-leap second. + /// + /// Panics on the out-of-range number of seconds and/or invalid nanosecond. + #[inline] + pub fn from_timestamp(secs: i64, nsecs: u32) -> NaiveDateTime { + let datetime = NaiveDateTime::from_timestamp_opt(secs, nsecs); + datetime.expect("invalid or out-of-range datetime") + } + + /// Makes a new `NaiveDateTime` from the number of non-leap seconds + /// since the midnight UTC on January 1, 1970 (aka "UNIX timestamp") + /// and the number of nanoseconds since the last whole non-leap second. + /// + /// Returns `None` on the out-of-range number of seconds and/or invalid nanosecond. + #[inline] + pub fn from_timestamp_opt(secs: i64, nsecs: u32) -> Option { + let (days, secs) = div_mod_floor(secs, 86400); + let date = days.to_i32().and_then(|days| days.checked_add(719163)) + .and_then(|days_ce| NaiveDate::from_num_days_from_ce_opt(days_ce)); + let time = NaiveTime::from_num_seconds_from_midnight_opt(secs as u32, nsecs); + match (date, time) { + (Some(date), Some(time)) => Some(NaiveDateTime { date: date, time: time }), + (_, _) => None, + } + } + + /// *Deprecated:* Same to [`NaiveDateTime::from_timestamp`](#method.from_timestamp). + #[inline] + pub fn from_num_seconds_from_unix_epoch(secs: i64, nsecs: u32) -> NaiveDateTime { + NaiveDateTime::from_timestamp(secs, nsecs) + } + + /// *Deprecated:* Same to [`NaiveDateTime::from_timestamp_opt`](#method.from_timestamp_opt). + #[inline] + pub fn from_num_seconds_from_unix_epoch_opt(secs: i64, nsecs: u32) -> Option { + NaiveDateTime::from_timestamp_opt(secs, nsecs) + } + + /// Parses a string with the specified format string and returns a new `NaiveDateTime`. + /// See the [`format::strftime` module](../../format/strftime/index.html) + /// on the supported escape sequences. + pub fn parse_from_str(s: &str, fmt: &str) -> ParseResult { + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, StrftimeItems::new(fmt))); + parsed.to_naive_datetime_with_offset(0) // no offset adjustment + } + + /// Retrieves a date component. + #[inline] + pub fn date(&self) -> NaiveDate { + self.date + } + + /// Retrieves a time component. + #[inline] + pub fn time(&self) -> NaiveTime { + self.time + } + + /// Returns the number of non-leap seconds since the midnight on January 1, 1970. + /// + /// Note that this does *not* account for the timezone! + /// The true "UNIX timestamp" would count seconds since the midnight *UTC* on the epoch. + #[inline] + pub fn timestamp(&self) -> i64 { + let ndays = self.date.num_days_from_ce() as i64; + let nseconds = self.time.num_seconds_from_midnight() as i64; + (ndays - 719163) * 86400 + nseconds + } + + /// Returns the number of milliseconds since the last whole non-leap second. + /// + /// The return value ranges from 0 to 999, + /// or for [leap seconds](../time/index.html#leap-second-handling), to 1,999. + #[inline] + pub fn timestamp_subsec_millis(&self) -> u32 { + self.timestamp_subsec_nanos() / 1_000_000 + } + + /// Returns the number of microseconds since the last whole non-leap second. + /// + /// The return value ranges from 0 to 999,999, + /// or for [leap seconds](../time/index.html#leap-second-handling), to 1,999,999. + #[inline] + pub fn timestamp_subsec_micros(&self) -> u32 { + self.timestamp_subsec_nanos() / 1_000 + } + + /// Returns the number of nanoseconds since the last whole non-leap second. + /// + /// The return value ranges from 0 to 999,999,999, + /// or for [leap seconds](../time/index.html#leap-second-handling), to 1,999,999,999. + #[inline] + pub fn timestamp_subsec_nanos(&self) -> u32 { + self.time.nanosecond() + } + + /// *Deprecated:* Same to [`NaiveDateTime::timestamp`](#method.timestamp). + #[inline] + pub fn num_seconds_from_unix_epoch(&self) -> i64 { + self.timestamp() + } + + /// Adds given `Duration` to the current date and time. + /// + /// Returns `None` when it will result in overflow. + pub fn checked_add(self, rhs: Duration) -> Option { + // Duration does not directly give its parts, so we need some additional calculations. + let days = rhs.num_days(); + let nanos = (rhs - Duration::days(days)).num_nanoseconds().unwrap(); + debug_assert!(Duration::days(days) + Duration::nanoseconds(nanos) == rhs); + debug_assert!(-86400_000_000_000 < nanos && nanos < 86400_000_000_000); + + let mut date = try_opt!(self.date.checked_add(Duration::days(days))); + let time = self.time + Duration::nanoseconds(nanos); + + // time always wraps around, but date needs to be adjusted for overflow. + if nanos < 0 && time > self.time { + date = try_opt!(date.pred_opt()); + } else if nanos > 0 && time < self.time { + date = try_opt!(date.succ_opt()); + } + Some(NaiveDateTime { date: date, time: time }) + } + + /// Subtracts given `Duration` from the current date and time. + /// + /// Returns `None` when it will result in overflow. + pub fn checked_sub(self, rhs: Duration) -> Option { + // Duration does not directly give its parts, so we need some additional calculations. + let days = rhs.num_days(); + let nanos = (rhs - Duration::days(days)).num_nanoseconds().unwrap(); + debug_assert!(Duration::days(days) + Duration::nanoseconds(nanos) == rhs); + debug_assert!(-86400_000_000_000 < nanos && nanos < 86400_000_000_000); + + let mut date = try_opt!(self.date.checked_sub(Duration::days(days))); + let time = self.time - Duration::nanoseconds(nanos); + + // time always wraps around, but date needs to be adjusted for overflow. + if nanos > 0 && time > self.time { + date = try_opt!(date.pred_opt()); + } else if nanos < 0 && time < self.time { + date = try_opt!(date.succ_opt()); + } + Some(NaiveDateTime { date: date, time: time }) + } + + /// Formats the combined date and time with the specified formatting items. + #[inline] + pub fn format_with_items<'a, I>(&self, items: I) -> DelayedFormat + where I: Iterator> + Clone { + DelayedFormat::new(Some(self.date.clone()), Some(self.time.clone()), items) + } + + /// Formats the combined date and time with the specified format string. + /// See the [`format::strftime` module](../../format/strftime/index.html) + /// on the supported escape sequences. + #[inline] + pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat> { + self.format_with_items(StrftimeItems::new(fmt)) + } +} + +impl Datelike for NaiveDateTime { + /// Returns the year number in the [calendar date](./index.html#calendar-date). + /// + /// See also the [`NaiveDate::year`](../date/struct.NaiveDate.html#method.year) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 25).and_hms(12, 34, 56); + /// assert_eq!(dt.year(), 2015); + /// ~~~~ + #[inline] + fn year(&self) -> i32 { + self.date.year() + } + + /// Returns the month number starting from 1. + /// + /// The return value ranges from 1 to 12. + /// + /// See also the [`NaiveDate::month`](../date/struct.NaiveDate.html#method.month) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 25).and_hms(12, 34, 56); + /// assert_eq!(dt.month(), 9); + /// ~~~~ + #[inline] + fn month(&self) -> u32 { + self.date.month() + } + + /// Returns the month number starting from 0. + /// + /// The return value ranges from 0 to 11. + /// + /// See also the [`NaiveDate::month0`](../date/struct.NaiveDate.html#method.month0) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 25).and_hms(12, 34, 56); + /// assert_eq!(dt.month0(), 8); + /// ~~~~ + #[inline] + fn month0(&self) -> u32 { + self.date.month0() + } + + /// Returns the day of month starting from 1. + /// + /// The return value ranges from 1 to 31. (The last day of month differs by months.) + /// + /// See also the [`NaiveDate::day`](../date/struct.NaiveDate.html#method.day) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 25).and_hms(12, 34, 56); + /// assert_eq!(dt.day(), 25); + /// ~~~~ + #[inline] + fn day(&self) -> u32 { + self.date.day() + } + + /// Returns the day of month starting from 0. + /// + /// The return value ranges from 0 to 30. (The last day of month differs by months.) + /// + /// See also the [`NaiveDate::day0`](../date/struct.NaiveDate.html#method.day0) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 25).and_hms(12, 34, 56); + /// assert_eq!(dt.day0(), 24); + /// ~~~~ + #[inline] + fn day0(&self) -> u32 { + self.date.day0() + } + + /// Returns the day of year starting from 1. + /// + /// The return value ranges from 1 to 366. (The last day of year differs by years.) + /// + /// See also the [`NaiveDate::ordinal`](../date/struct.NaiveDate.html#method.ordinal) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 25).and_hms(12, 34, 56); + /// assert_eq!(dt.ordinal(), 268); + /// ~~~~ + #[inline] + fn ordinal(&self) -> u32 { + self.date.ordinal() + } + + /// Returns the day of year starting from 0. + /// + /// The return value ranges from 0 to 365. (The last day of year differs by years.) + /// + /// See also the [`NaiveDate::ordinal0`](../date/struct.NaiveDate.html#method.ordinal0) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 25).and_hms(12, 34, 56); + /// assert_eq!(dt.ordinal0(), 267); + /// ~~~~ + #[inline] + fn ordinal0(&self) -> u32 { + self.date.ordinal0() + } + + /// Returns the day of week. + /// + /// See also the [`NaiveDate::weekday`](../date/struct.NaiveDate.html#method.weekday) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike, Weekday}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 25).and_hms(12, 34, 56); + /// assert_eq!(dt.weekday(), Weekday::Fri); + /// ~~~~ + #[inline] + fn weekday(&self) -> Weekday { + self.date.weekday() + } + + #[inline] + fn isoweekdate(&self) -> (i32, u32, Weekday) { + self.date.isoweekdate() + } + + /// Makes a new `NaiveDateTime` with the year number changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// + /// See also the + /// [`NaiveDate::with_year`](../date/struct.NaiveDate.html#method.with_year) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 25).and_hms(12, 34, 56); + /// assert_eq!(dt.with_year(2016), Some(NaiveDate::from_ymd(2016, 9, 25).and_hms(12, 34, 56))); + /// assert_eq!(dt.with_year(-308), Some(NaiveDate::from_ymd(-308, 9, 25).and_hms(12, 34, 56))); + /// ~~~~ + #[inline] + fn with_year(&self, year: i32) -> Option { + self.date.with_year(year).map(|d| NaiveDateTime { date: d, ..*self }) + } + + /// Makes a new `NaiveDateTime` with the month number (starting from 1) changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// + /// See also the + /// [`NaiveDate::with_month`](../date/struct.NaiveDate.html#method.with_month) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 30).and_hms(12, 34, 56); + /// assert_eq!(dt.with_month(10), Some(NaiveDate::from_ymd(2015, 10, 30).and_hms(12, 34, 56))); + /// assert_eq!(dt.with_month(13), None); // no month 13 + /// assert_eq!(dt.with_month(2), None); // no February 30 + /// ~~~~ + #[inline] + fn with_month(&self, month: u32) -> Option { + self.date.with_month(month).map(|d| NaiveDateTime { date: d, ..*self }) + } + + /// Makes a new `NaiveDateTime` with the month number (starting from 0) changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// + /// See also the + /// [`NaiveDate::with_month0`](../date/struct.NaiveDate.html#method.with_month0) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 30).and_hms(12, 34, 56); + /// assert_eq!(dt.with_month0(9), Some(NaiveDate::from_ymd(2015, 10, 30).and_hms(12, 34, 56))); + /// assert_eq!(dt.with_month0(12), None); // no month 13 + /// assert_eq!(dt.with_month0(1), None); // no February 30 + /// ~~~~ + #[inline] + fn with_month0(&self, month0: u32) -> Option { + self.date.with_month0(month0).map(|d| NaiveDateTime { date: d, ..*self }) + } + + /// Makes a new `NaiveDateTime` with the day of month (starting from 1) changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// + /// See also the + /// [`NaiveDate::with_day`](../date/struct.NaiveDate.html#method.with_day) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms(12, 34, 56); + /// assert_eq!(dt.with_day(30), Some(NaiveDate::from_ymd(2015, 9, 30).and_hms(12, 34, 56))); + /// assert_eq!(dt.with_day(31), None); // no September 31 + /// ~~~~ + #[inline] + fn with_day(&self, day: u32) -> Option { + self.date.with_day(day).map(|d| NaiveDateTime { date: d, ..*self }) + } + + /// Makes a new `NaiveDateTime` with the day of month (starting from 0) changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// + /// See also the + /// [`NaiveDate::with_day0`](../date/struct.NaiveDate.html#method.with_day0) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms(12, 34, 56); + /// assert_eq!(dt.with_day0(29), Some(NaiveDate::from_ymd(2015, 9, 30).and_hms(12, 34, 56))); + /// assert_eq!(dt.with_day0(30), None); // no September 31 + /// ~~~~ + #[inline] + fn with_day0(&self, day0: u32) -> Option { + self.date.with_day0(day0).map(|d| NaiveDateTime { date: d, ..*self }) + } + + /// Makes a new `NaiveDateTime` with the day of year (starting from 1) changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// + /// See also the + /// [`NaiveDate::with_ordinal`](../date/struct.NaiveDate.html#method.with_ordinal) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms(12, 34, 56); + /// assert_eq!(dt.with_ordinal(60), + /// Some(NaiveDate::from_ymd(2015, 3, 1).and_hms(12, 34, 56))); + /// assert_eq!(dt.with_ordinal(366), None); // 2015 had only 365 days + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2016, 9, 8).and_hms(12, 34, 56); + /// assert_eq!(dt.with_ordinal(60), + /// Some(NaiveDate::from_ymd(2016, 2, 29).and_hms(12, 34, 56))); + /// assert_eq!(dt.with_ordinal(366), + /// Some(NaiveDate::from_ymd(2016, 12, 31).and_hms(12, 34, 56))); + /// ~~~~ + #[inline] + fn with_ordinal(&self, ordinal: u32) -> Option { + self.date.with_ordinal(ordinal).map(|d| NaiveDateTime { date: d, ..*self }) + } + + /// Makes a new `NaiveDateTime` with the day of year (starting from 0) changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// + /// See also the + /// [`NaiveDate::with_ordinal0`](../date/struct.NaiveDate.html#method.with_ordinal0) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Datelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms(12, 34, 56); + /// assert_eq!(dt.with_ordinal0(59), + /// Some(NaiveDate::from_ymd(2015, 3, 1).and_hms(12, 34, 56))); + /// assert_eq!(dt.with_ordinal0(365), None); // 2015 had only 365 days + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2016, 9, 8).and_hms(12, 34, 56); + /// assert_eq!(dt.with_ordinal0(59), + /// Some(NaiveDate::from_ymd(2016, 2, 29).and_hms(12, 34, 56))); + /// assert_eq!(dt.with_ordinal0(365), + /// Some(NaiveDate::from_ymd(2016, 12, 31).and_hms(12, 34, 56))); + /// ~~~~ + #[inline] + fn with_ordinal0(&self, ordinal0: u32) -> Option { + self.date.with_ordinal0(ordinal0).map(|d| NaiveDateTime { date: d, ..*self }) + } +} + +impl Timelike for NaiveDateTime { + /// Returns the hour number from 0 to 23. + /// + /// See also the [`NaiveTime::hour`](../time/struct.NaiveTime.html#method.hour) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Timelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(12, 34, 56, 789); + /// assert_eq!(dt.hour(), 12); + /// ~~~~ + #[inline] + fn hour(&self) -> u32 { + self.time.hour() + } + + /// Returns the minute number from 0 to 59. + /// + /// See also the [`NaiveTime::minute`](../time/struct.NaiveTime.html#method.minute) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Timelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(12, 34, 56, 789); + /// assert_eq!(dt.minute(), 34); + /// ~~~~ + #[inline] + fn minute(&self) -> u32 { + self.time.minute() + } + + /// Returns the second number from 0 to 59. + /// + /// See also the [`NaiveTime::second`](../time/struct.NaiveTime.html#method.second) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Timelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(12, 34, 56, 789); + /// assert_eq!(dt.second(), 56); + /// ~~~~ + #[inline] + fn second(&self) -> u32 { + self.time.second() + } + + /// Returns the number of nanoseconds since the whole non-leap second. + /// The range from 1,000,000,000 to 1,999,999,999 represents + /// the [leap second](./naive/time/index.html#leap-second-handling). + /// + /// See also the + /// [`NaiveTime::nanosecond`](../time/struct.NaiveTime.html#method.nanosecond) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Timelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(12, 34, 56, 789); + /// assert_eq!(dt.nanosecond(), 789_000_000); + /// ~~~~ + #[inline] + fn nanosecond(&self) -> u32 { + self.time.nanosecond() + } + + /// Makes a new `NaiveDateTime` with the hour number changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// + /// See also the + /// [`NaiveTime::with_hour`](../time/struct.NaiveTime.html#method.with_hour) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Timelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(12, 34, 56, 789); + /// assert_eq!(dt.with_hour(7), + /// Some(NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(7, 34, 56, 789))); + /// assert_eq!(dt.with_hour(24), None); + /// ~~~~ + #[inline] + fn with_hour(&self, hour: u32) -> Option { + self.time.with_hour(hour).map(|t| NaiveDateTime { time: t, ..*self }) + } + + /// Makes a new `NaiveDateTime` with the minute number changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// + /// See also the + /// [`NaiveTime::with_minute`](../time/struct.NaiveTime.html#method.with_minute) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Timelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(12, 34, 56, 789); + /// assert_eq!(dt.with_minute(45), + /// Some(NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(12, 45, 56, 789))); + /// assert_eq!(dt.with_minute(60), None); + /// ~~~~ + #[inline] + fn with_minute(&self, min: u32) -> Option { + self.time.with_minute(min).map(|t| NaiveDateTime { time: t, ..*self }) + } + + /// Makes a new `NaiveDateTime` with the second number changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// As with the [`second`](#method.second) method, + /// the input range is restricted to 0 through 59. + /// + /// See also the + /// [`NaiveTime::with_second`](../time/struct.NaiveTime.html#method.with_second) method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Timelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(12, 34, 56, 789); + /// assert_eq!(dt.with_second(17), + /// Some(NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(12, 34, 17, 789))); + /// assert_eq!(dt.with_second(60), None); + /// ~~~~ + #[inline] + fn with_second(&self, sec: u32) -> Option { + self.time.with_second(sec).map(|t| NaiveDateTime { time: t, ..*self }) + } + + /// Makes a new `NaiveDateTime` with nanoseconds since the whole non-leap second changed. + /// + /// Returns `None` when the resulting `NaiveDateTime` would be invalid. + /// As with the [`nanosecond`](#method.nanosecond) method, + /// the input range can exceed 1,000,000,000 for leap seconds. + /// + /// See also the + /// [`NaiveTime::with_nanosecond`](../time/struct.NaiveTime.html#method.with_nanosecond) + /// method. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveDate, NaiveDateTime, Timelike}; + /// + /// let dt: NaiveDateTime = NaiveDate::from_ymd(2015, 9, 8).and_hms_milli(12, 34, 56, 789); + /// assert_eq!(dt.with_nanosecond(333_333_333), + /// Some(NaiveDate::from_ymd(2015, 9, 8).and_hms_nano(12, 34, 56, 333_333_333))); + /// assert_eq!(dt.with_nanosecond(1_333_333_333), // leap second + /// Some(NaiveDate::from_ymd(2015, 9, 8).and_hms_nano(12, 34, 56, 1_333_333_333))); + /// assert_eq!(dt.with_nanosecond(2_000_000_000), None); + /// ~~~~ + #[inline] + fn with_nanosecond(&self, nano: u32) -> Option { + self.time.with_nanosecond(nano).map(|t| NaiveDateTime { time: t, ..*self }) + } +} + +/// `NaiveDateTime` can be used as a key to the hash maps (in principle). +/// +/// Practically this also takes account of fractional seconds, so it is not recommended. +/// (For the obvious reason this also distinguishes leap seconds from non-leap seconds.) +impl hash::Hash for NaiveDateTime { + fn hash(&self, state: &mut H) { + self.date.hash(state); + self.time.hash(state); + } +} + +impl Add for NaiveDateTime { + type Output = NaiveDateTime; + + #[inline] + fn add(self, rhs: Duration) -> NaiveDateTime { + self.checked_add(rhs).expect("`NaiveDateTime + Duration` overflowed") + } +} + +impl Sub for NaiveDateTime { + type Output = Duration; + + fn sub(self, rhs: NaiveDateTime) -> Duration { + (self.date - rhs.date) + (self.time - rhs.time) + } +} + +impl Sub for NaiveDateTime { + type Output = NaiveDateTime; + + #[inline] + fn sub(self, rhs: Duration) -> NaiveDateTime { + self.checked_sub(rhs).expect("`NaiveDateTime - Duration` overflowed") + } +} + +impl fmt::Debug for NaiveDateTime { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}T{:?}", self.date, self.time) + } +} + +impl fmt::Display for NaiveDateTime { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} {}", self.date, self.time) + } +} + +impl str::FromStr for NaiveDateTime { + type Err = ParseError; + + fn from_str(s: &str) -> ParseResult { + const ITEMS: &'static [Item<'static>] = &[ + Item::Space(""), Item::Numeric(Numeric::Year, Pad::Zero), + Item::Space(""), Item::Literal("-"), + Item::Space(""), Item::Numeric(Numeric::Month, Pad::Zero), + Item::Space(""), Item::Literal("-"), + Item::Space(""), Item::Numeric(Numeric::Day, Pad::Zero), + Item::Space(""), Item::Literal("T"), // XXX shouldn't this be case-insensitive? + Item::Space(""), Item::Numeric(Numeric::Hour, Pad::Zero), + Item::Space(""), Item::Literal(":"), + Item::Space(""), Item::Numeric(Numeric::Minute, Pad::Zero), + Item::Space(""), Item::Literal(":"), + Item::Space(""), Item::Numeric(Numeric::Second, Pad::Zero), + Item::Fixed(Fixed::Nanosecond), Item::Space(""), + ]; + + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parsed.to_naive_datetime_with_offset(0) + } +} + +#[cfg(feature = "rustc-serialize")] +mod rustc_serialize { + use super::NaiveDateTime; + use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; + + // TODO the current serialization format is NEVER intentionally defined. + // in the future it is likely to be redefined to more sane and reasonable format. + + impl Encodable for NaiveDateTime { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_struct("NaiveDateTime", 2, |s| { + try!(s.emit_struct_field("date", 0, |s| self.date.encode(s))); + try!(s.emit_struct_field("time", 1, |s| self.time.encode(s))); + Ok(()) + }) + } + } + + impl Decodable for NaiveDateTime { + fn decode(d: &mut D) -> Result { + d.read_struct("NaiveDateTime", 2, |d| { + let date = try!(d.read_struct_field("date", 0, Decodable::decode)); + let time = try!(d.read_struct_field("time", 1, Decodable::decode)); + Ok(NaiveDateTime::new(date, time)) + }) + } + } + + #[test] + fn test_encodable() { + use naive::date::{self, NaiveDate}; + use rustc_serialize::json::encode; + + assert_eq!( + encode(&NaiveDate::from_ymd(2016, 7, 8).and_hms_milli(9, 10, 48, 90)).ok(), + Some(r#"{"date":{"ymdf":16518115},"time":{"secs":33048,"frac":90000000}}"#.into())); + assert_eq!( + encode(&NaiveDate::from_ymd(2014, 7, 24).and_hms(12, 34, 6)).ok(), + Some(r#"{"date":{"ymdf":16501977},"time":{"secs":45246,"frac":0}}"#.into())); + assert_eq!( + encode(&NaiveDate::from_ymd(0, 1, 1).and_hms_milli(0, 0, 59, 1_000)).ok(), + Some(r#"{"date":{"ymdf":20},"time":{"secs":59,"frac":1000000000}}"#.into())); + assert_eq!( + encode(&NaiveDate::from_ymd(-1, 12, 31).and_hms_nano(23, 59, 59, 7)).ok(), + Some(r#"{"date":{"ymdf":-2341},"time":{"secs":86399,"frac":7}}"#.into())); + assert_eq!( + encode(&date::MIN.and_hms(0, 0, 0)).ok(), + Some(r#"{"date":{"ymdf":-2147483625},"time":{"secs":0,"frac":0}}"#.into())); + assert_eq!( + encode(&date::MAX.and_hms_nano(23, 59, 59, 1_999_999_999)).ok(), + Some(r#"{"date":{"ymdf":2147481311},"time":{"secs":86399,"frac":1999999999}}"#.into())); + } + + #[test] + fn test_decodable() { + use naive::date::{self, NaiveDate}; + use rustc_serialize::json; + + let decode = |s: &str| json::decode::(s); + + assert_eq!( + decode(r#"{"date":{"ymdf":16518115},"time":{"secs":33048,"frac":90000000}}"#).ok(), + Some(NaiveDate::from_ymd(2016, 7, 8).and_hms_milli(9, 10, 48, 90))); + assert_eq!( + decode(r#"{"time":{"frac":0,"secs":45246},"date":{"ymdf":16501977}}"#).ok(), + Some(NaiveDate::from_ymd(2014, 7, 24).and_hms(12, 34, 6))); + assert_eq!( + decode(r#"{"date": {"ymdf": 20}, + "time": {"secs": 59, + "frac": 1000000000}}"#).ok(), + Some(NaiveDate::from_ymd(0, 1, 1).and_hms_milli(0, 0, 59, 1_000))); + assert_eq!( + decode(r#"{"date":{"ymdf":-2341},"time":{"secs":86399,"frac":7}}"#).ok(), + Some(NaiveDate::from_ymd(-1, 12, 31).and_hms_nano(23, 59, 59, 7))); + assert_eq!( + decode(r#"{"date":{"ymdf":-2147483625},"time":{"secs":0,"frac":0}}"#).ok(), + Some(date::MIN.and_hms(0, 0, 0))); + assert_eq!( + decode(r#"{"date":{"ymdf":2147481311},"time":{"secs":86399,"frac":1999999999}}"#).ok(), + Some(date::MAX.and_hms_nano(23, 59, 59, 1_999_999_999))); + + // bad formats + assert!(decode(r#"{"date":{},"time":{}}"#).is_err()); + assert!(decode(r#"{"date":{"ymdf":0},"time":{"secs":0,"frac":0}}"#).is_err()); + assert!(decode(r#"{"date":{"ymdf":20},"time":{"secs":86400,"frac":0}}"#).is_err()); + assert!(decode(r#"{"date":{"ymdf":20},"time":{"secs":0,"frac":-1}}"#).is_err()); + assert!(decode(r#"{"date":20,"time":{"secs":0,"frac":0}}"#).is_err()); + assert!(decode(r#"{"date":"2016-08-04","time":"01:02:03.456"}"#).is_err()); + assert!(decode(r#"{"date":{"ymdf":20}}"#).is_err()); + assert!(decode(r#"{"time":{"secs":0,"frac":0}}"#).is_err()); + assert!(decode(r#"{"ymdf":20}"#).is_err()); + assert!(decode(r#"{"secs":0,"frac":0}"#).is_err()); + assert!(decode(r#"{}"#).is_err()); + assert!(decode(r#"0"#).is_err()); + assert!(decode(r#"-1"#).is_err()); + assert!(decode(r#""string""#).is_err()); + assert!(decode(r#""2016-08-04T12:34:56""#).is_err()); // :( + assert!(decode(r#""2016-08-04T12:34:56.789""#).is_err()); // :( + assert!(decode(r#"null"#).is_err()); + } +} + +#[cfg(feature = "serde")] +mod serde { + use super::NaiveDateTime; + use serde::{ser, de}; + + // TODO not very optimized for space (binary formats would want something better) + + impl ser::Serialize for NaiveDateTime { + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + where S: ser::Serializer + { + serializer.serialize_str(&format!("{:?}", self)) + } + } + + struct NaiveDateTimeVisitor; + + impl de::Visitor for NaiveDateTimeVisitor { + type Value = NaiveDateTime; + + fn visit_str(&mut self, value: &str) -> Result + where E: de::Error + { + value.parse().map_err(|err| E::custom(format!("{}", err))) + } + } + + impl de::Deserialize for NaiveDateTime { + fn deserialize(deserializer: &mut D) -> Result + where D: de::Deserializer + { + deserializer.deserialize(NaiveDateTimeVisitor) + } + } + + #[cfg(test)] extern crate serde_json; + + #[test] + fn test_serde_serialize() { + use naive::date::{self, NaiveDate}; + use self::serde_json::to_string; + + assert_eq!( + to_string(&NaiveDate::from_ymd(2016, 7, 8).and_hms_milli(9, 10, 48, 90)).ok(), + Some(r#""2016-07-08T09:10:48.090""#.into())); + assert_eq!( + to_string(&NaiveDate::from_ymd(2014, 7, 24).and_hms(12, 34, 6)).ok(), + Some(r#""2014-07-24T12:34:06""#.into())); + assert_eq!( + to_string(&NaiveDate::from_ymd(0, 1, 1).and_hms_milli(0, 0, 59, 1_000)).ok(), + Some(r#""0000-01-01T00:00:60""#.into())); + assert_eq!( + to_string(&NaiveDate::from_ymd(-1, 12, 31).and_hms_nano(23, 59, 59, 7)).ok(), + Some(r#""-0001-12-31T23:59:59.000000007""#.into())); + assert_eq!( + to_string(&date::MIN.and_hms(0, 0, 0)).ok(), + Some(r#""-262144-01-01T00:00:00""#.into())); + assert_eq!( + to_string(&date::MAX.and_hms_nano(23, 59, 59, 1_999_999_999)).ok(), + Some(r#""+262143-12-31T23:59:60.999999999""#.into())); + } + + #[test] + fn test_serde_deserialize() { + use naive::date::{self, NaiveDate}; + use self::serde_json::from_str; + + let from_str = |s: &str| serde_json::from_str::(s); + + assert_eq!( + from_str(r#""2016-07-08T09:10:48.090""#).ok(), + Some(NaiveDate::from_ymd(2016, 7, 8).and_hms_milli(9, 10, 48, 90))); + assert_eq!( + from_str(r#""2016-7-8T9:10:48.09""#).ok(), + Some(NaiveDate::from_ymd(2016, 7, 8).and_hms_milli(9, 10, 48, 90))); + assert_eq!( + from_str(r#""2014-07-24T12:34:06""#).ok(), + Some(NaiveDate::from_ymd(2014, 7, 24).and_hms(12, 34, 6))); + assert_eq!( + from_str(r#""0000-01-01T00:00:60""#).ok(), + Some(NaiveDate::from_ymd(0, 1, 1).and_hms_milli(0, 0, 59, 1_000))); + assert_eq!( + from_str(r#""0-1-1T0:0:60""#).ok(), + Some(NaiveDate::from_ymd(0, 1, 1).and_hms_milli(0, 0, 59, 1_000))); + assert_eq!( + from_str(r#""-0001-12-31T23:59:59.000000007""#).ok(), + Some(NaiveDate::from_ymd(-1, 12, 31).and_hms_nano(23, 59, 59, 7))); + assert_eq!( + from_str(r#""-262144-01-01T00:00:00""#).ok(), + Some(date::MIN.and_hms(0, 0, 0))); + assert_eq!( + from_str(r#""+262143-12-31T23:59:60.999999999""#).ok(), + Some(date::MAX.and_hms_nano(23, 59, 59, 1_999_999_999))); + assert_eq!( + from_str(r#""+262143-12-31T23:59:60.9999999999997""#).ok(), // excess digits are ignored + Some(date::MAX.and_hms_nano(23, 59, 59, 1_999_999_999))); + + // bad formats + assert!(from_str(r#""""#).is_err()); + assert!(from_str(r#""2016-07-08""#).is_err()); + assert!(from_str(r#""09:10:48.090""#).is_err()); + assert!(from_str(r#""20160708T091048.090""#).is_err()); + assert!(from_str(r#""2000-00-00T00:00:00""#).is_err()); + assert!(from_str(r#""2000-02-30T00:00:00""#).is_err()); + assert!(from_str(r#""2001-02-29T00:00:00""#).is_err()); + assert!(from_str(r#""2002-02-28T24:00:00""#).is_err()); + assert!(from_str(r#""2002-02-28T23:60:00""#).is_err()); + assert!(from_str(r#""2002-02-28T23:59:61""#).is_err()); + assert!(from_str(r#""2016-07-08T09:10:48,090""#).is_err()); + assert!(from_str(r#""2016-07-08 09:10:48.090""#).is_err()); + assert!(from_str(r#""2016-007-08T09:10:48.090""#).is_err()); + assert!(from_str(r#""yyyy-mm-ddThh:mm:ss.fffffffff""#).is_err()); + assert!(from_str(r#"0"#).is_err()); + assert!(from_str(r#"20160708000000"#).is_err()); + assert!(from_str(r#"{}"#).is_err()); + assert!(from_str(r#"{"date":{"ymdf":20},"time":{"secs":0,"frac":0}}"#).is_err()); // :( + assert!(from_str(r#"null"#).is_err()); + } +} + +#[cfg(test)] +mod tests { + use super::NaiveDateTime; + use Datelike; + use duration::Duration; + use naive::date as naive_date; + use naive::date::NaiveDate; + use std::i64; + + #[test] + fn test_datetime_from_timestamp() { + let from_timestamp = |secs| NaiveDateTime::from_timestamp_opt(secs, 0); + let ymdhms = |y,m,d,h,n,s| NaiveDate::from_ymd(y,m,d).and_hms(h,n,s); + assert_eq!(from_timestamp(-1), Some(ymdhms(1969, 12, 31, 23, 59, 59))); + assert_eq!(from_timestamp(0), Some(ymdhms(1970, 1, 1, 0, 0, 0))); + assert_eq!(from_timestamp(1), Some(ymdhms(1970, 1, 1, 0, 0, 1))); + assert_eq!(from_timestamp(1_000_000_000), Some(ymdhms(2001, 9, 9, 1, 46, 40))); + assert_eq!(from_timestamp(0x7fffffff), Some(ymdhms(2038, 1, 19, 3, 14, 7))); + assert_eq!(from_timestamp(i64::MIN), None); + assert_eq!(from_timestamp(i64::MAX), None); + } + + #[test] + fn test_datetime_add() { + fn check((y,m,d,h,n,s): (i32,u32,u32,u32,u32,u32), rhs: Duration, + result: Option<(i32,u32,u32,u32,u32,u32)>) { + let lhs = NaiveDate::from_ymd(y, m, d).and_hms(h, n, s); + let sum = result.map(|(y,m,d,h,n,s)| NaiveDate::from_ymd(y, m, d).and_hms(h, n, s)); + assert_eq!(lhs.checked_add(rhs), sum); + assert_eq!(lhs.checked_sub(-rhs), sum); + }; + + check((2014,5,6, 7,8,9), Duration::seconds(3600 + 60 + 1), Some((2014,5,6, 8,9,10))); + check((2014,5,6, 7,8,9), Duration::seconds(-(3600 + 60 + 1)), Some((2014,5,6, 6,7,8))); + check((2014,5,6, 7,8,9), Duration::seconds(86399), Some((2014,5,7, 7,8,8))); + check((2014,5,6, 7,8,9), Duration::seconds(86400 * 10), Some((2014,5,16, 7,8,9))); + check((2014,5,6, 7,8,9), Duration::seconds(-86400 * 10), Some((2014,4,26, 7,8,9))); + check((2014,5,6, 7,8,9), Duration::seconds(86400 * 10), Some((2014,5,16, 7,8,9))); + + // overflow check + // assumes that we have correct values for MAX/MIN_DAYS_FROM_YEAR_0 from `naive::date`. + // (they are private constants, but the equivalence is tested in that module.) + let max_days_from_year_0 = naive_date::MAX - NaiveDate::from_ymd(0,1,1); + check((0,1,1, 0,0,0), max_days_from_year_0, Some((naive_date::MAX.year(),12,31, 0,0,0))); + check((0,1,1, 0,0,0), max_days_from_year_0 + Duration::seconds(86399), + Some((naive_date::MAX.year(),12,31, 23,59,59))); + check((0,1,1, 0,0,0), max_days_from_year_0 + Duration::seconds(86400), None); + check((0,1,1, 0,0,0), Duration::max_value(), None); + + let min_days_from_year_0 = naive_date::MIN - NaiveDate::from_ymd(0,1,1); + check((0,1,1, 0,0,0), min_days_from_year_0, Some((naive_date::MIN.year(),1,1, 0,0,0))); + check((0,1,1, 0,0,0), min_days_from_year_0 - Duration::seconds(1), None); + check((0,1,1, 0,0,0), Duration::min_value(), None); + } + + #[test] + fn test_datetime_sub() { + let ymdhms = |y,m,d,h,n,s| NaiveDate::from_ymd(y,m,d).and_hms(h,n,s); + assert_eq!(ymdhms(2014, 5, 6, 7, 8, 9) - ymdhms(2014, 5, 6, 7, 8, 9), Duration::zero()); + assert_eq!(ymdhms(2014, 5, 6, 7, 8, 10) - ymdhms(2014, 5, 6, 7, 8, 9), + Duration::seconds(1)); + assert_eq!(ymdhms(2014, 5, 6, 7, 8, 9) - ymdhms(2014, 5, 6, 7, 8, 10), + Duration::seconds(-1)); + assert_eq!(ymdhms(2014, 5, 7, 7, 8, 9) - ymdhms(2014, 5, 6, 7, 8, 10), + Duration::seconds(86399)); + assert_eq!(ymdhms(2001, 9, 9, 1, 46, 39) - ymdhms(1970, 1, 1, 0, 0, 0), + Duration::seconds(999_999_999)); + } + + #[test] + fn test_datetime_timestamp() { + let to_timestamp = |y,m,d,h,n,s| NaiveDate::from_ymd(y,m,d).and_hms(h,n,s).timestamp(); + assert_eq!(to_timestamp(1969, 12, 31, 23, 59, 59), -1); + assert_eq!(to_timestamp(1970, 1, 1, 0, 0, 0), 0); + assert_eq!(to_timestamp(1970, 1, 1, 0, 0, 1), 1); + assert_eq!(to_timestamp(2001, 9, 9, 1, 46, 40), 1_000_000_000); + assert_eq!(to_timestamp(2038, 1, 19, 3, 14, 7), 0x7fffffff); + } + + #[test] + fn test_datetime_from_str() { + // valid cases + let valid = [ + "2015-2-18T23:16:9.15", + "-77-02-18T23:16:09", + " +82701 - 05 - 6 T 15 : 9 : 60.898989898989 ", + ]; + for &s in &valid { + let d = match s.parse::() { + Ok(d) => d, + Err(e) => panic!("parsing `{}` has failed: {}", s, e) + }; + let s_ = format!("{:?}", d); + // `s` and `s_` may differ, but `s.parse()` and `s_.parse()` must be same + let d_ = match s_.parse::() { + Ok(d) => d, + Err(e) => panic!("`{}` is parsed into `{:?}`, but reparsing that has failed: {}", + s, d, e) + }; + assert!(d == d_, "`{}` is parsed into `{:?}`, but reparsed result \ + `{:?}` does not match", s, d, d_); + } + + // some invalid cases + // since `ParseErrorKind` is private, all we can do is to check if there was an error + assert!("".parse::().is_err()); + assert!("x".parse::().is_err()); + assert!("15".parse::().is_err()); + assert!("15:8:9".parse::().is_err()); + assert!("15-8-9".parse::().is_err()); + assert!("2015-15-15T15:15:15".parse::().is_err()); + assert!("2012-12-12T12:12:12x".parse::().is_err()); + assert!("2012-123-12T12:12:12".parse::().is_err()); + assert!("+ 82701-123-12T12:12:12".parse::().is_err()); + assert!("+802701-123-12T12:12:12".parse::().is_err()); // out-of-bound + } + + #[test] + fn test_datetime_parse_from_str() { + let ymdhms = |y,m,d,h,n,s| NaiveDate::from_ymd(y,m,d).and_hms(h,n,s); + assert_eq!(NaiveDateTime::parse_from_str("2014-5-7T12:34:56+09:30", "%Y-%m-%dT%H:%M:%S%z"), + Ok(ymdhms(2014, 5, 7, 12, 34, 56))); // ignore offset + assert_eq!(NaiveDateTime::parse_from_str("2015-W06-1 000000", "%G-W%V-%u%H%M%S"), + Ok(ymdhms(2015, 2, 2, 0, 0, 0))); + assert_eq!(NaiveDateTime::parse_from_str("Fri, 09 Aug 2013 23:54:35 GMT", + "%a, %d %b %Y %H:%M:%S GMT"), + Ok(ymdhms(2013, 8, 9, 23, 54, 35))); + assert!(NaiveDateTime::parse_from_str("Sat, 09 Aug 2013 23:54:35 GMT", + "%a, %d %b %Y %H:%M:%S GMT").is_err()); + assert!(NaiveDateTime::parse_from_str("2014-5-7 12:3456", "%Y-%m-%d %H:%M:%S").is_err()); + assert!(NaiveDateTime::parse_from_str("12:34:56", "%H:%M:%S").is_err()); // insufficient + } + + #[test] + fn test_datetime_format() { + let dt = NaiveDate::from_ymd(2010, 9, 8).and_hms_milli(7, 6, 54, 321); + assert_eq!(dt.format("%c").to_string(), "Wed Sep 8 07:06:54 2010"); + assert_eq!(dt.format("%s").to_string(), "1283929614"); + assert_eq!(dt.format("%t%n%%%n%t").to_string(), "\t\n%\n\t"); + + // a horror of leap second: coming near to you. + let dt = NaiveDate::from_ymd(2012, 6, 30).and_hms_milli(23, 59, 59, 1_000); + assert_eq!(dt.format("%c").to_string(), "Sat Jun 30 23:59:60 2012"); + assert_eq!(dt.format("%s").to_string(), "1341100799"); // not 1341100800, it's intentional. + } + + #[test] + fn test_datetime_add_sub_invariant() { // issue #37 + let base = NaiveDate::from_ymd(2000, 1, 1).and_hms(0, 0, 0); + let t = -946684799990000; + let time = base + Duration::microseconds(t); + assert_eq!(t, (time - base).num_microseconds().unwrap()); + } +} diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/naive/time.rs cargo-0.19.0/vendor/chrono-0.2.25/src/naive/time.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/naive/time.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/naive/time.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,1164 @@ +// This is a part of rust-chrono. +// Copyright (c) 2014-2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +//! ISO 8601 time without timezone. +//! +//! # Leap Second Handling +//! +//! Since 1960s, the manmade atomic clock has been so accurate that +//! it is much more accurate than Earth's own motion. +//! It became desirable to define the civil time in terms of the atomic clock, +//! but that risks the desynchronization of the civil time from Earth. +//! To account for this, the designers of the Coordinated Universal Time (UTC) +//! made that the UTC should be kept within 0.9 seconds of the observed Earth-bound time. +//! When the mean solar day is longer than the ideal (86,400 seconds), +//! the error slowly accumulates and it is necessary to add a **leap second** +//! to slow the UTC down a bit. +//! (We may also remove a second to speed the UTC up a bit, but it never happened.) +//! The leap second, if any, follows 23:59:59 of June 30 or December 31 in the UTC. +//! +//! Fast forward to the 21st century, +//! we have seen 26 leap seconds from January 1972 to December 2015. +//! Yes, 26 seconds. Probably you can read this paragraph within 26 seconds. +//! But those 26 seconds, and possibly more in the future, are never predictable, +//! and whether to add a leap second or not is known only before 6 months. +//! Internet-based clocks (via NTP) do account for known leap seconds, +//! but the system API normally doesn't (and often can't, with no network connection) +//! and there is no reliable way to retrieve leap second information. +//! +//! Chrono does not try to accurately implement leap seconds; it is impossible. +//! Rather, **it allows for leap seconds but behaves as if there are *no other* leap seconds.** +//! Various time arithmetics will ignore any possible leap second(s) +//! except when the operand were actually a leap second. +//! The leap second is indicated via fractional seconds more than 1 second, +//! so values like `NaiveTime::from_hms_milli(23, 56, 4, 1_005)` are allowed; +//! that value would mean 5ms after the beginning of a leap second following 23:56:04. +//! Parsing and formatting will correctly handle times that look like leap seconds, +//! and you can then conveniently ignore leap seconds if you are not prepared for them. +//! +//! If you cannot tolerate this behavior, +//! you must use a separate `TimeZone` for the International Atomic Time (TAI). +//! TAI is like UTC but has no leap seconds, and thus slightly differs from UTC. +//! Chrono 0.2 does not provide such implementation, but it is planned for 0.3. + +use std::{str, fmt, hash}; +use std::ops::{Add, Sub}; + +use Timelike; +use div::div_mod_floor; +use duration::Duration; +use format::{Item, Numeric, Pad, Fixed}; +use format::{parse, Parsed, ParseError, ParseResult, DelayedFormat, StrftimeItems}; + +/// ISO 8601 time without timezone. +/// Allows for the nanosecond precision and optional leap second representation. +/// +/// +/// Chrono has a notable policy on the [leap second handling](./index.html#leap-second-handling), +/// designed to be maximally useful for typical users. +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] +pub struct NaiveTime { + secs: u32, + frac: u32, +} + +impl NaiveTime { + /// Makes a new `NaiveTime` from the serialized representation. + /// Used for serialization formats. + #[cfg(feature = "rustc-serialize")] + fn from_serialized(secs: u32, frac: u32) -> Option { + // check if the values are in the range + if secs >= 86400 { return None; } + if frac >= 2_000_000_000 { return None; } + + let time = NaiveTime { secs: secs, frac: frac }; + Some(time) + } + + /// Returns a serialized representation of this `NaiveDate`. + #[cfg(feature = "rustc-serialize")] + fn to_serialized(&self) -> (u32, u32) { + (self.secs, self.frac) + } + + /// Makes a new `NaiveTime` from hour, minute and second. + /// + /// No [leap second](./index.html#leap-second-handling) is allowed here; + /// use `NaiveTime::from_hms_*` methods with a subsecond parameter instead. + /// + /// Panics on invalid hour, minute and/or second. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// let t = NaiveTime::from_hms(23, 56, 4); + /// assert_eq!(t.hour(), 23); + /// assert_eq!(t.minute(), 56); + /// assert_eq!(t.second(), 4); + /// assert_eq!(t.nanosecond(), 0); + /// ~~~~ + #[inline] + pub fn from_hms(hour: u32, min: u32, sec: u32) -> NaiveTime { + NaiveTime::from_hms_opt(hour, min, sec).expect("invalid time") + } + + /// Makes a new `NaiveTime` from hour, minute and second. + /// + /// No [leap second](./index.html#leap-second-handling) is allowed here; + /// use `NaiveTime::from_hms_*_opt` methods with a subsecond parameter instead. + /// + /// Returns `None` on invalid hour, minute and/or second. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveTime; + /// + /// let hms = |h,m,s| NaiveTime::from_hms_opt(h, m, s); + /// assert!(hms(0, 0, 0).is_some()); + /// assert!(hms(23, 59, 59).is_some()); + /// assert!(hms(24, 0, 0).is_none()); + /// assert!(hms(23, 60, 0).is_none()); + /// assert!(hms(23, 59, 60).is_none()); + /// ~~~~ + #[inline] + pub fn from_hms_opt(hour: u32, min: u32, sec: u32) -> Option { + NaiveTime::from_hms_nano_opt(hour, min, sec, 0) + } + + /// Makes a new `NaiveTime` from hour, minute, second and millisecond. + /// + /// The millisecond part can exceed 1,000 + /// in order to represent the [leap second](./index.html#leap-second-handling). + /// + /// Panics on invalid hour, minute, second and/or millisecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// let t = NaiveTime::from_hms_milli(23, 56, 4, 12); + /// assert_eq!(t.hour(), 23); + /// assert_eq!(t.minute(), 56); + /// assert_eq!(t.second(), 4); + /// assert_eq!(t.nanosecond(), 12_000_000); + /// ~~~~ + #[inline] + pub fn from_hms_milli(hour: u32, min: u32, sec: u32, milli: u32) -> NaiveTime { + NaiveTime::from_hms_milli_opt(hour, min, sec, milli).expect("invalid time") + } + + /// Makes a new `NaiveTime` from hour, minute, second and millisecond. + /// + /// The millisecond part can exceed 1,000 + /// in order to represent the [leap second](./index.html#leap-second-handling). + /// + /// Returns `None` on invalid hour, minute, second and/or millisecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveTime; + /// + /// let hmsm = |h,m,s,milli| NaiveTime::from_hms_milli_opt(h, m, s, milli); + /// assert!(hmsm(0, 0, 0, 0).is_some()); + /// assert!(hmsm(23, 59, 59, 999).is_some()); + /// assert!(hmsm(23, 59, 59, 1_999).is_some()); // a leap second following 23:59:59 + /// assert!(hmsm(24, 0, 0, 0).is_none()); + /// assert!(hmsm(23, 60, 0, 0).is_none()); + /// assert!(hmsm(23, 59, 60, 0).is_none()); + /// assert!(hmsm(23, 59, 59, 2_000).is_none()); + /// ~~~~ + #[inline] + pub fn from_hms_milli_opt(hour: u32, min: u32, sec: u32, milli: u32) -> Option { + milli.checked_mul(1_000_000) + .and_then(|nano| NaiveTime::from_hms_nano_opt(hour, min, sec, nano)) + } + + /// Makes a new `NaiveTime` from hour, minute, second and microsecond. + /// + /// The microsecond part can exceed 1,000,000 + /// in order to represent the [leap second](./index.html#leap-second-handling). + /// + /// Panics on invalid hour, minute, second and/or microsecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// let t = NaiveTime::from_hms_micro(23, 56, 4, 12_345); + /// assert_eq!(t.hour(), 23); + /// assert_eq!(t.minute(), 56); + /// assert_eq!(t.second(), 4); + /// assert_eq!(t.nanosecond(), 12_345_000); + /// ~~~~ + #[inline] + pub fn from_hms_micro(hour: u32, min: u32, sec: u32, micro: u32) -> NaiveTime { + NaiveTime::from_hms_micro_opt(hour, min, sec, micro).expect("invalid time") + } + + /// Makes a new `NaiveTime` from hour, minute, second and microsecond. + /// + /// The microsecond part can exceed 1,000,000 + /// in order to represent the [leap second](./index.html#leap-second-handling). + /// + /// Returns `None` on invalid hour, minute, second and/or microsecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveTime; + /// + /// let hmsu = |h,m,s,micro| NaiveTime::from_hms_micro_opt(h, m, s, micro); + /// assert!(hmsu(0, 0, 0, 0).is_some()); + /// assert!(hmsu(23, 59, 59, 999_999).is_some()); + /// assert!(hmsu(23, 59, 59, 1_999_999).is_some()); // a leap second following 23:59:59 + /// assert!(hmsu(24, 0, 0, 0).is_none()); + /// assert!(hmsu(23, 60, 0, 0).is_none()); + /// assert!(hmsu(23, 59, 60, 0).is_none()); + /// assert!(hmsu(23, 59, 59, 2_000_000).is_none()); + /// ~~~~ + #[inline] + pub fn from_hms_micro_opt(hour: u32, min: u32, sec: u32, micro: u32) -> Option { + micro.checked_mul(1_000) + .and_then(|nano| NaiveTime::from_hms_nano_opt(hour, min, sec, nano)) + } + + /// Makes a new `NaiveTime` from hour, minute, second and nanosecond. + /// + /// The nanosecond part can exceed 1,000,000,000 + /// in order to represent the [leap second](./index.html#leap-second-handling). + /// + /// Panics on invalid hour, minute, second and/or nanosecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// let t = NaiveTime::from_hms_nano(23, 56, 4, 12_345_678); + /// assert_eq!(t.hour(), 23); + /// assert_eq!(t.minute(), 56); + /// assert_eq!(t.second(), 4); + /// assert_eq!(t.nanosecond(), 12_345_678); + /// ~~~~ + #[inline] + pub fn from_hms_nano(hour: u32, min: u32, sec: u32, nano: u32) -> NaiveTime { + NaiveTime::from_hms_nano_opt(hour, min, sec, nano).expect("invalid time") + } + + /// Makes a new `NaiveTime` from hour, minute, second and nanosecond. + /// + /// The nanosecond part can exceed 1,000,000,000 + /// in order to represent the [leap second](./index.html#leap-second-handling). + /// + /// Returns `None` on invalid hour, minute, second and/or nanosecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveTime; + /// + /// let hmsn = |h,m,s,nano| NaiveTime::from_hms_nano_opt(h, m, s, nano); + /// assert!(hmsn(0, 0, 0, 0).is_some()); + /// assert!(hmsn(23, 59, 59, 999_999_999).is_some()); + /// assert!(hmsn(23, 59, 59, 1_999_999_999).is_some()); // a leap second following 23:59:59 + /// assert!(hmsn(24, 0, 0, 0).is_none()); + /// assert!(hmsn(23, 60, 0, 0).is_none()); + /// assert!(hmsn(23, 59, 60, 0).is_none()); + /// assert!(hmsn(23, 59, 59, 2_000_000_000).is_none()); + /// ~~~~ + #[inline] + pub fn from_hms_nano_opt(hour: u32, min: u32, sec: u32, nano: u32) -> Option { + if hour >= 24 || min >= 60 || sec >= 60 || nano >= 2_000_000_000 { return None; } + let secs = hour * 3600 + min * 60 + sec; + Some(NaiveTime { secs: secs, frac: nano }) + } + + /// Makes a new `NaiveTime` from the number of seconds since midnight and nanosecond. + /// + /// The nanosecond part can exceed 1,000,000,000 + /// in order to represent the [leap second](./index.html#leap-second-handling). + /// + /// Panics on invalid number of seconds and/or nanosecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// let t = NaiveTime::from_num_seconds_from_midnight(86164, 12_345_678); + /// assert_eq!(t.hour(), 23); + /// assert_eq!(t.minute(), 56); + /// assert_eq!(t.second(), 4); + /// assert_eq!(t.nanosecond(), 12_345_678); + /// ~~~~ + #[inline] + pub fn from_num_seconds_from_midnight(secs: u32, nano: u32) -> NaiveTime { + NaiveTime::from_num_seconds_from_midnight_opt(secs, nano).expect("invalid time") + } + + /// Makes a new `NaiveTime` from the number of seconds since midnight and nanosecond. + /// + /// The nanosecond part can exceed 1,000,000,000 + /// in order to represent the [leap second](./index.html#leap-second-handling). + /// + /// Returns `None` on invalid number of seconds and/or nanosecond. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveTime; + /// + /// let secs = |secs,nano| NaiveTime::from_num_seconds_from_midnight_opt(secs, nano); + /// assert!(secs(0, 0).is_some()); + /// assert!(secs(86399, 999_999_999).is_some()); + /// assert!(secs(86399, 1_999_999_999).is_some()); // a leap second following 23:59:59 + /// assert!(secs(86400, 0).is_none()); + /// assert!(secs(86399, 2_000_000_000).is_none()); + /// ~~~~ + #[inline] + pub fn from_num_seconds_from_midnight_opt(secs: u32, nano: u32) -> Option { + if secs >= 86400 || nano >= 2_000_000_000 { return None; } + Some(NaiveTime { secs: secs, frac: nano }) + } + + /// Parses a string with the specified format string and returns a new `NaiveTime`. + /// See the [`format::strftime` module](../../format/strftime/index.html) + /// on the supported escape sequences. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveTime; + /// + /// assert_eq!(NaiveTime::parse_from_str("23:56:04", "%H:%M:%S"), + /// Ok(NaiveTime::from_hms(23, 56, 4))); + /// assert_eq!(NaiveTime::parse_from_str("pm012345.6789", "%p%I%M%S%.f"), + /// Ok(NaiveTime::from_hms_micro(13, 23, 45, 678_900))); + /// ~~~~ + /// + /// Date and offset is ignored for the purpose of parsing. + /// + /// ~~~~ + /// # use chrono::NaiveTime; + /// assert_eq!(NaiveTime::parse_from_str("2014-5-17T12:34:56+09:30", "%Y-%m-%dT%H:%M:%S%z"), + /// Ok(NaiveTime::from_hms(12, 34, 56))); + /// ~~~~ + /// + /// [Leap seconds](./index.html#leap-second-handling) are correctly handled by + /// treating any time of the form `hh:mm:60` as a leap second. + /// (This equally applies to the formatting, so the round trip is possible.) + /// + /// ~~~~ + /// # use chrono::NaiveTime; + /// assert_eq!(NaiveTime::parse_from_str("08:59:60.123", "%H:%M:%S%.f"), + /// Ok(NaiveTime::from_hms_milli(8, 59, 59, 1_123))); + /// ~~~~ + /// + /// Missing seconds are assumed to be zero, + /// but out-of-bound times or insufficient fields are errors otherwise. + /// + /// ~~~~ + /// # use chrono::NaiveTime; + /// assert_eq!(NaiveTime::parse_from_str("7:15", "%H:%M"), + /// Ok(NaiveTime::from_hms(7, 15, 0))); + /// + /// assert!(NaiveTime::parse_from_str("04m33s", "%Mm%Ss").is_err()); + /// assert!(NaiveTime::parse_from_str("12", "%H").is_err()); + /// assert!(NaiveTime::parse_from_str("17:60", "%H:%M").is_err()); + /// assert!(NaiveTime::parse_from_str("24:00:00", "%H:%M:%S").is_err()); + /// ~~~~ + /// + /// All parsed fields should be consistent to each other, otherwise it's an error. + /// Here `%H` is for 24-hour clocks, unlike `%I`, + /// and thus can be independently determined without AM/PM. + /// + /// ~~~~ + /// # use chrono::NaiveTime; + /// assert!(NaiveTime::parse_from_str("13:07 AM", "%H:%M %p").is_err()); + /// ~~~~ + pub fn parse_from_str(s: &str, fmt: &str) -> ParseResult { + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, StrftimeItems::new(fmt))); + parsed.to_naive_time() + } + + /// Formats the time with the specified formatting items. + /// Otherwise it is same to the ordinary [`format`](#method.format) method. + /// + /// The `Iterator` of items should be `Clone`able, + /// since the resulting `DelayedFormat` value may be formatted multiple times. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveTime; + /// use chrono::format::strftime::StrftimeItems; + /// + /// let fmt = StrftimeItems::new("%H:%M:%S"); + /// let t = NaiveTime::from_hms(23, 56, 4); + /// assert_eq!(t.format_with_items(fmt.clone()).to_string(), "23:56:04"); + /// assert_eq!(t.format("%H:%M:%S").to_string(), "23:56:04"); + /// ~~~~ + #[inline] + pub fn format_with_items<'a, I>(&self, items: I) -> DelayedFormat + where I: Iterator> + Clone { + DelayedFormat::new(None, Some(self.clone()), items) + } + + /// Formats the time with the specified format string. + /// See the [`format::strftime` module](../../format/strftime/index.html) + /// on the supported escape sequences. + /// + /// This returns a `DelayedFormat`, + /// which gets converted to a string only when actual formatting happens. + /// You may use the `to_string` method to get a `String`, + /// or just feed it into `print!` and other formatting macros. + /// (In this way it avoids the redundant memory allocation.) + /// + /// A wrong format string does *not* issue an error immediately. + /// Rather, converting or formatting the `DelayedFormat` fails. + /// You are recommended to immediately use `DelayedFormat` for this reason. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::NaiveTime; + /// + /// let t = NaiveTime::from_hms_nano(23, 56, 4, 12_345_678); + /// assert_eq!(t.format("%H:%M:%S").to_string(), "23:56:04"); + /// assert_eq!(t.format("%H:%M:%S%.6f").to_string(), "23:56:04.012345"); + /// assert_eq!(t.format("%-I:%M %p").to_string(), "11:56 PM"); + /// ~~~~ + #[inline] + pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat> { + self.format_with_items(StrftimeItems::new(fmt)) + } + + /// Returns a triple of the hour, minute and second numbers. + fn hms(&self) -> (u32, u32, u32) { + let (mins, sec) = div_mod_floor(self.secs, 60); + let (hour, min) = div_mod_floor(mins, 60); + (hour, min, sec) + } +} + +impl Timelike for NaiveTime { + /// Returns the hour number from 0 to 23. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// assert_eq!(NaiveTime::from_hms(0, 0, 0).hour(), 0); + /// assert_eq!(NaiveTime::from_hms_nano(23, 56, 4, 12_345_678).hour(), 23); + /// ~~~~ + #[inline] + fn hour(&self) -> u32 { + self.hms().0 + } + + /// Returns the minute number from 0 to 59. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// assert_eq!(NaiveTime::from_hms(0, 0, 0).minute(), 0); + /// assert_eq!(NaiveTime::from_hms_nano(23, 56, 4, 12_345_678).minute(), 56); + /// ~~~~ + #[inline] + fn minute(&self) -> u32 { + self.hms().1 + } + + /// Returns the second number from 0 to 59. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// assert_eq!(NaiveTime::from_hms(0, 0, 0).second(), 0); + /// assert_eq!(NaiveTime::from_hms_nano(23, 56, 4, 12_345_678).second(), 4); + /// ~~~~ + /// + /// This method never returns 60 even when it is a leap second. + /// ([Why?](./index.html#leap-second-handling)) + /// Use the proper [formatting method](#method.format) to get a human-readable representation. + /// + /// ~~~~ + /// # use chrono::{NaiveTime, Timelike}; + /// let leap = NaiveTime::from_hms_milli(23, 59, 59, 1_000); + /// assert_eq!(leap.second(), 59); + /// assert_eq!(leap.format("%H:%M:%S").to_string(), "23:59:60"); + /// ~~~~ + #[inline] + fn second(&self) -> u32 { + self.hms().2 + } + + /// Returns the number of nanoseconds since the whole non-leap second. + /// The range from 1,000,000,000 to 1,999,999,999 represents + /// the [leap second](./naive/time/index.html#leap-second-handling). + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// assert_eq!(NaiveTime::from_hms(0, 0, 0).nanosecond(), 0); + /// assert_eq!(NaiveTime::from_hms_nano(23, 56, 4, 12_345_678).nanosecond(), 12_345_678); + /// ~~~~ + /// + /// Leap seconds may have seemingly out-of-range return values. + /// You can reduce the range with `time.nanosecond() % 1_000_000_000`, or + /// use the proper [formatting method](#method.format) to get a human-readable representation. + /// + /// ~~~~ + /// # use chrono::{NaiveTime, Timelike}; + /// let leap = NaiveTime::from_hms_milli(23, 59, 59, 1_000); + /// assert_eq!(leap.nanosecond(), 1_000_000_000); + /// assert_eq!(leap.format("%H:%M:%S%.9f").to_string(), "23:59:60.000000000"); + /// ~~~~ + #[inline] + fn nanosecond(&self) -> u32 { + self.frac + } + + /// Makes a new `NaiveTime` with the hour number changed. + /// + /// Returns `None` when the resulting `NaiveTime` would be invalid. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// let dt = NaiveTime::from_hms_nano(23, 56, 4, 12_345_678); + /// assert_eq!(dt.with_hour(7), Some(NaiveTime::from_hms_nano(7, 56, 4, 12_345_678))); + /// assert_eq!(dt.with_hour(24), None); + /// ~~~~ + #[inline] + fn with_hour(&self, hour: u32) -> Option { + if hour >= 24 { return None; } + let secs = hour * 3600 + self.secs % 3600; + Some(NaiveTime { secs: secs, ..*self }) + } + + /// Makes a new `NaiveTime` with the minute number changed. + /// + /// Returns `None` when the resulting `NaiveTime` would be invalid. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// let dt = NaiveTime::from_hms_nano(23, 56, 4, 12_345_678); + /// assert_eq!(dt.with_minute(45), Some(NaiveTime::from_hms_nano(23, 45, 4, 12_345_678))); + /// assert_eq!(dt.with_minute(60), None); + /// ~~~~ + #[inline] + fn with_minute(&self, min: u32) -> Option { + if min >= 60 { return None; } + let secs = self.secs / 3600 * 3600 + min * 60 + self.secs % 60; + Some(NaiveTime { secs: secs, ..*self }) + } + + /// Makes a new `NaiveTime` with the second number changed. + /// + /// Returns `None` when the resulting `NaiveTime` would be invalid. + /// As with the [`second`](#method.second) method, + /// the input range is restricted to 0 through 59. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// let dt = NaiveTime::from_hms_nano(23, 56, 4, 12_345_678); + /// assert_eq!(dt.with_second(17), Some(NaiveTime::from_hms_nano(23, 56, 17, 12_345_678))); + /// assert_eq!(dt.with_second(60), None); + /// ~~~~ + #[inline] + fn with_second(&self, sec: u32) -> Option { + if sec >= 60 { return None; } + let secs = self.secs / 60 * 60 + sec; + Some(NaiveTime { secs: secs, ..*self }) + } + + /// Makes a new `NaiveTime` with nanoseconds since the whole non-leap second changed. + /// + /// Returns `None` when the resulting `NaiveTime` would be invalid. + /// As with the [`nanosecond`](#method.nanosecond) method, + /// the input range can exceed 1,000,000,000 for leap seconds. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// let dt = NaiveTime::from_hms_nano(23, 56, 4, 12_345_678); + /// assert_eq!(dt.with_nanosecond(333_333_333), + /// Some(NaiveTime::from_hms_nano(23, 56, 4, 333_333_333))); + /// assert_eq!(dt.with_nanosecond(2_000_000_000), None); + /// ~~~~ + /// + /// Leap seconds can theoretically follow *any* whole second. + /// The following would be a proper leap second at the time zone offset of UTC-00:03:57 + /// (there are several historical examples comparable to this "non-sense" offset), + /// and therefore is allowed. + /// + /// ~~~~ + /// # use chrono::{NaiveTime, Timelike}; + /// # let dt = NaiveTime::from_hms_nano(23, 56, 4, 12_345_678); + /// assert_eq!(dt.with_nanosecond(1_333_333_333), + /// Some(NaiveTime::from_hms_nano(23, 56, 4, 1_333_333_333))); + /// ~~~~ + #[inline] + fn with_nanosecond(&self, nano: u32) -> Option { + if nano >= 2_000_000_000 { return None; } + Some(NaiveTime { frac: nano, ..*self }) + } + + /// Returns the number of non-leap seconds past the last midnight. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{NaiveTime, Timelike}; + /// + /// assert_eq!(NaiveTime::from_hms(1, 2, 3).num_seconds_from_midnight(), + /// 3723); + /// assert_eq!(NaiveTime::from_hms_nano(23, 56, 4, 12_345_678).num_seconds_from_midnight(), + /// 86164); + /// assert_eq!(NaiveTime::from_hms_milli(23, 59, 59, 1_000).num_seconds_from_midnight(), + /// 86399); + /// ~~~~ + #[inline] + fn num_seconds_from_midnight(&self) -> u32 { + self.secs // do not repeat the calculation! + } +} + +/// `NaiveTime` can be used as a key to the hash maps (in principle). +/// +/// Practically this also takes account of fractional seconds, so it is not recommended. +/// (For the obvious reason this also distinguishes leap seconds from non-leap seconds.) +impl hash::Hash for NaiveTime { + fn hash(&self, state: &mut H) { + self.secs.hash(state); + self.frac.hash(state); + } +} + +impl Add for NaiveTime { + type Output = NaiveTime; + + fn add(self, rhs: Duration) -> NaiveTime { + // there is no direct interface in `Duration` to get only the nanosecond part, + // so we need to do the additional calculation here. + let mut rhssecs = rhs.num_seconds(); + let mut rhs2 = rhs - Duration::seconds(rhssecs); + if rhs2 < Duration::zero() { // possible when rhs < 0 + rhssecs -= 1; + rhs2 = rhs2 + Duration::seconds(1); + } + debug_assert!(rhs2 >= Duration::zero()); + let mut secs = self.secs + (rhssecs % 86400 + 86400) as u32; + let mut nanos = self.frac + rhs2.num_nanoseconds().unwrap() as u32; + + // always ignore leap seconds after the current whole second + let maxnanos = if self.frac >= 1_000_000_000 {2_000_000_000} else {1_000_000_000}; + + if nanos >= maxnanos { + nanos -= maxnanos; + secs += 1; + } + NaiveTime { secs: secs % 86400, frac: nanos } + } +} + +impl Sub for NaiveTime { + type Output = Duration; + + fn sub(self, rhs: NaiveTime) -> Duration { + // the number of whole non-leap seconds + let secs = self.secs as i64 - rhs.secs as i64 - 1; + + // the fractional second from the rhs to the next non-leap second + let maxnanos = if rhs.frac >= 1_000_000_000 {2_000_000_000} else {1_000_000_000}; + let nanos1 = maxnanos - rhs.frac; + + // the fractional second from the last leap or non-leap second to the lhs + let lastfrac = if self.frac >= 1_000_000_000 {1_000_000_000} else {0}; + let nanos2 = self.frac - lastfrac; + + Duration::seconds(secs) + Duration::nanoseconds(nanos1 as i64 + nanos2 as i64) + } +} + +impl Sub for NaiveTime { + type Output = NaiveTime; + + #[inline] + fn sub(self, rhs: Duration) -> NaiveTime { self.add(-rhs) } +} + +impl fmt::Debug for NaiveTime { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (hour, min, sec) = self.hms(); + let (sec, nano) = if self.frac >= 1_000_000_000 { + (sec + 1, self.frac - 1_000_000_000) + } else { + (sec, self.frac) + }; + + try!(write!(f, "{:02}:{:02}:{:02}", hour, min, sec)); + if nano == 0 { + Ok(()) + } else if nano % 1_000_000 == 0 { + write!(f, ".{:03}", nano / 1_000_000) + } else if nano % 1_000 == 0 { + write!(f, ".{:06}", nano / 1_000) + } else { + write!(f, ".{:09}", nano) + } + } +} + +impl fmt::Display for NaiveTime { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(self, f) } +} + +impl str::FromStr for NaiveTime { + type Err = ParseError; + + fn from_str(s: &str) -> ParseResult { + const ITEMS: &'static [Item<'static>] = &[ + Item::Space(""), Item::Numeric(Numeric::Hour, Pad::Zero), + Item::Space(""), Item::Literal(":"), + Item::Space(""), Item::Numeric(Numeric::Minute, Pad::Zero), + Item::Space(""), Item::Literal(":"), + Item::Space(""), Item::Numeric(Numeric::Second, Pad::Zero), + Item::Fixed(Fixed::Nanosecond), Item::Space(""), + ]; + + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parsed.to_naive_time() + } +} + +#[cfg(feature = "rustc-serialize")] +mod rustc_serialize { + use super::NaiveTime; + use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; + + // TODO the current serialization format is NEVER intentionally defined. + // this basically follows the automatically generated implementation for those traits, + // plus manual verification steps for avoiding security problem. + // in the future it is likely to be redefined to more sane and reasonable format. + + impl Encodable for NaiveTime { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + let (secs, frac) = self.to_serialized(); + s.emit_struct("NaiveTime", 2, |s| { + try!(s.emit_struct_field("secs", 0, |s| secs.encode(s))); + try!(s.emit_struct_field("frac", 1, |s| frac.encode(s))); + Ok(()) + }) + } + } + + impl Decodable for NaiveTime { + fn decode(d: &mut D) -> Result { + d.read_struct("NaiveTime", 2, |d| { + let secs = try!(d.read_struct_field("secs", 0, Decodable::decode)); + let frac = try!(d.read_struct_field("frac", 1, Decodable::decode)); + NaiveTime::from_serialized(secs, frac).ok_or_else(|| d.error("invalid time")) + }) + } + } + + #[test] + fn test_encodable() { + use rustc_serialize::json::encode; + + assert_eq!(encode(&NaiveTime::from_hms(0, 0, 0)).ok(), + Some(r#"{"secs":0,"frac":0}"#.into())); + assert_eq!(encode(&NaiveTime::from_hms_milli(0, 0, 0, 950)).ok(), + Some(r#"{"secs":0,"frac":950000000}"#.into())); + assert_eq!(encode(&NaiveTime::from_hms_milli(0, 0, 59, 1_000)).ok(), + Some(r#"{"secs":59,"frac":1000000000}"#.into())); + assert_eq!(encode(&NaiveTime::from_hms(0, 1, 2)).ok(), + Some(r#"{"secs":62,"frac":0}"#.into())); + assert_eq!(encode(&NaiveTime::from_hms(7, 8, 9)).ok(), + Some(r#"{"secs":25689,"frac":0}"#.into())); + assert_eq!(encode(&NaiveTime::from_hms_micro(12, 34, 56, 789)).ok(), + Some(r#"{"secs":45296,"frac":789000}"#.into())); + assert_eq!(encode(&NaiveTime::from_hms_nano(23, 59, 59, 1_999_999_999)).ok(), + Some(r#"{"secs":86399,"frac":1999999999}"#.into())); + } + + #[test] + fn test_decodable() { + use rustc_serialize::json; + + let decode = |s: &str| json::decode::(s); + + assert_eq!(decode(r#"{"secs":0,"frac":0}"#).ok(), + Some(NaiveTime::from_hms(0, 0, 0))); + assert_eq!(decode(r#"{"frac":950000000,"secs":0}"#).ok(), + Some(NaiveTime::from_hms_milli(0, 0, 0, 950))); + assert_eq!(decode(r#"{"secs":59,"frac":1000000000}"#).ok(), + Some(NaiveTime::from_hms_milli(0, 0, 59, 1_000))); + assert_eq!(decode(r#"{"frac": 0, + "secs": 62}"#).ok(), + Some(NaiveTime::from_hms(0, 1, 2))); + assert_eq!(decode(r#"{"secs":25689,"frac":0}"#).ok(), + Some(NaiveTime::from_hms(7, 8, 9))); + assert_eq!(decode(r#"{"secs":45296,"frac":789000}"#).ok(), + Some(NaiveTime::from_hms_micro(12, 34, 56, 789))); + assert_eq!(decode(r#"{"secs":86399,"frac":1999999999}"#).ok(), + Some(NaiveTime::from_hms_nano(23, 59, 59, 1_999_999_999))); + + // bad formats + assert!(decode(r#"{"secs":0,"frac":-1}"#).is_err()); + assert!(decode(r#"{"secs":-1,"frac":0}"#).is_err()); + assert!(decode(r#"{"secs":86400,"frac":0}"#).is_err()); + assert!(decode(r#"{"secs":0,"frac":2000000000}"#).is_err()); + assert!(decode(r#"{"secs":0}"#).is_err()); + assert!(decode(r#"{"frac":0}"#).is_err()); + assert!(decode(r#"{"secs":0.3,"frac":0}"#).is_err()); + assert!(decode(r#"{"secs":0,"frac":0.4}"#).is_err()); + assert!(decode(r#"{}"#).is_err()); + assert!(decode(r#"0"#).is_err()); + assert!(decode(r#"86399"#).is_err()); + assert!(decode(r#""string""#).is_err()); + assert!(decode(r#""12:34:56""#).is_err()); // :( + assert!(decode(r#""12:34:56.789""#).is_err()); // :( + assert!(decode(r#"null"#).is_err()); + } +} + +#[cfg(feature = "serde")] +mod serde { + use super::NaiveTime; + use serde::{ser, de}; + + // TODO not very optimized for space (binary formats would want something better) + // TODO round-trip for general leap seconds (not just those with second = 60) + + impl ser::Serialize for NaiveTime { + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + where S: ser::Serializer + { + serializer.serialize_str(&format!("{:?}", self)) + } + } + + struct NaiveTimeVisitor; + + impl de::Visitor for NaiveTimeVisitor { + type Value = NaiveTime; + + fn visit_str(&mut self, value: &str) -> Result + where E: de::Error + { + value.parse().map_err(|err| E::custom(format!("{}", err))) + } + } + + impl de::Deserialize for NaiveTime { + fn deserialize(deserializer: &mut D) -> Result + where D: de::Deserializer + { + deserializer.deserialize(NaiveTimeVisitor) + } + } + + #[cfg(test)] extern crate serde_json; + + #[test] + fn test_serde_serialize() { + use self::serde_json::to_string; + + assert_eq!(to_string(&NaiveTime::from_hms(0, 0, 0)).ok(), + Some(r#""00:00:00""#.into())); + assert_eq!(to_string(&NaiveTime::from_hms_milli(0, 0, 0, 950)).ok(), + Some(r#""00:00:00.950""#.into())); + assert_eq!(to_string(&NaiveTime::from_hms_milli(0, 0, 59, 1_000)).ok(), + Some(r#""00:00:60""#.into())); + assert_eq!(to_string(&NaiveTime::from_hms(0, 1, 2)).ok(), + Some(r#""00:01:02""#.into())); + assert_eq!(to_string(&NaiveTime::from_hms_nano(3, 5, 7, 98765432)).ok(), + Some(r#""03:05:07.098765432""#.into())); + assert_eq!(to_string(&NaiveTime::from_hms(7, 8, 9)).ok(), + Some(r#""07:08:09""#.into())); + assert_eq!(to_string(&NaiveTime::from_hms_micro(12, 34, 56, 789)).ok(), + Some(r#""12:34:56.000789""#.into())); + assert_eq!(to_string(&NaiveTime::from_hms_nano(23, 59, 59, 1_999_999_999)).ok(), + Some(r#""23:59:60.999999999""#.into())); + } + + #[test] + fn test_serde_deserialize() { + use self::serde_json::from_str; + + let from_str = |s: &str| serde_json::from_str::(s); + + assert_eq!(from_str(r#""00:00:00""#).ok(), + Some(NaiveTime::from_hms(0, 0, 0))); + assert_eq!(from_str(r#""0:0:0""#).ok(), + Some(NaiveTime::from_hms(0, 0, 0))); + assert_eq!(from_str(r#""00:00:00.950""#).ok(), + Some(NaiveTime::from_hms_milli(0, 0, 0, 950))); + assert_eq!(from_str(r#""0:0:0.95""#).ok(), + Some(NaiveTime::from_hms_milli(0, 0, 0, 950))); + assert_eq!(from_str(r#""00:00:60""#).ok(), + Some(NaiveTime::from_hms_milli(0, 0, 59, 1_000))); + assert_eq!(from_str(r#""00:01:02""#).ok(), + Some(NaiveTime::from_hms(0, 1, 2))); + assert_eq!(from_str(r#""03:05:07.098765432""#).ok(), + Some(NaiveTime::from_hms_nano(3, 5, 7, 98765432))); + assert_eq!(from_str(r#""07:08:09""#).ok(), + Some(NaiveTime::from_hms(7, 8, 9))); + assert_eq!(from_str(r#""12:34:56.000789""#).ok(), + Some(NaiveTime::from_hms_micro(12, 34, 56, 789))); + assert_eq!(from_str(r#""23:59:60.999999999""#).ok(), + Some(NaiveTime::from_hms_nano(23, 59, 59, 1_999_999_999))); + assert_eq!(from_str(r#""23:59:60.9999999999997""#).ok(), // excess digits are ignored + Some(NaiveTime::from_hms_nano(23, 59, 59, 1_999_999_999))); + + // bad formats + assert!(from_str(r#""""#).is_err()); + assert!(from_str(r#""000000""#).is_err()); + assert!(from_str(r#""00:00:61""#).is_err()); + assert!(from_str(r#""00:60:00""#).is_err()); + assert!(from_str(r#""24:00:00""#).is_err()); + assert!(from_str(r#""23:59:59,1""#).is_err()); + assert!(from_str(r#""012:34:56""#).is_err()); + assert!(from_str(r#""hh:mm:ss""#).is_err()); + assert!(from_str(r#"0"#).is_err()); + assert!(from_str(r#"86399"#).is_err()); + assert!(from_str(r#"{}"#).is_err()); + assert!(from_str(r#"{"secs":0,"frac":0}"#).is_err()); // :( + assert!(from_str(r#"null"#).is_err()); + } +} + +#[cfg(test)] +mod tests { + use super::NaiveTime; + use Timelike; + use duration::Duration; + use std::u32; + + #[test] + fn test_time_from_hms_milli() { + assert_eq!(NaiveTime::from_hms_milli_opt(3, 5, 7, 0), + Some(NaiveTime::from_hms_nano(3, 5, 7, 0))); + assert_eq!(NaiveTime::from_hms_milli_opt(3, 5, 7, 777), + Some(NaiveTime::from_hms_nano(3, 5, 7, 777_000_000))); + assert_eq!(NaiveTime::from_hms_milli_opt(3, 5, 7, 1_999), + Some(NaiveTime::from_hms_nano(3, 5, 7, 1_999_000_000))); + assert_eq!(NaiveTime::from_hms_milli_opt(3, 5, 7, 2_000), None); + assert_eq!(NaiveTime::from_hms_milli_opt(3, 5, 7, 5_000), None); // overflow check + assert_eq!(NaiveTime::from_hms_milli_opt(3, 5, 7, u32::MAX), None); + } + + #[test] + fn test_time_from_hms_micro() { + assert_eq!(NaiveTime::from_hms_micro_opt(3, 5, 7, 0), + Some(NaiveTime::from_hms_nano(3, 5, 7, 0))); + assert_eq!(NaiveTime::from_hms_micro_opt(3, 5, 7, 333), + Some(NaiveTime::from_hms_nano(3, 5, 7, 333_000))); + assert_eq!(NaiveTime::from_hms_micro_opt(3, 5, 7, 777_777), + Some(NaiveTime::from_hms_nano(3, 5, 7, 777_777_000))); + assert_eq!(NaiveTime::from_hms_micro_opt(3, 5, 7, 1_999_999), + Some(NaiveTime::from_hms_nano(3, 5, 7, 1_999_999_000))); + assert_eq!(NaiveTime::from_hms_micro_opt(3, 5, 7, 2_000_000), None); + assert_eq!(NaiveTime::from_hms_micro_opt(3, 5, 7, 5_000_000), None); // overflow check + assert_eq!(NaiveTime::from_hms_micro_opt(3, 5, 7, u32::MAX), None); + } + + #[test] + fn test_time_hms() { + assert_eq!(NaiveTime::from_hms(3, 5, 7).hour(), 3); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_hour(0), + Some(NaiveTime::from_hms(0, 5, 7))); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_hour(23), + Some(NaiveTime::from_hms(23, 5, 7))); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_hour(24), None); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_hour(u32::MAX), None); + + assert_eq!(NaiveTime::from_hms(3, 5, 7).minute(), 5); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_minute(0), + Some(NaiveTime::from_hms(3, 0, 7))); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_minute(59), + Some(NaiveTime::from_hms(3, 59, 7))); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_minute(60), None); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_minute(u32::MAX), None); + + assert_eq!(NaiveTime::from_hms(3, 5, 7).second(), 7); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_second(0), + Some(NaiveTime::from_hms(3, 5, 0))); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_second(59), + Some(NaiveTime::from_hms(3, 5, 59))); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_second(60), None); + assert_eq!(NaiveTime::from_hms(3, 5, 7).with_second(u32::MAX), None); + } + + #[test] + fn test_time_add() { + fn check(lhs: NaiveTime, rhs: Duration, sum: NaiveTime) { + assert_eq!(lhs + rhs, sum); + //assert_eq!(rhs + lhs, sum); + } + + let hmsm = |h,m,s,mi| NaiveTime::from_hms_milli(h, m, s, mi); + + check(hmsm(3, 5, 7, 900), Duration::zero(), hmsm(3, 5, 7, 900)); + check(hmsm(3, 5, 7, 900), Duration::milliseconds(100), hmsm(3, 5, 8, 0)); + check(hmsm(3, 5, 7, 1_300), Duration::milliseconds(800), hmsm(3, 5, 8, 100)); + check(hmsm(3, 5, 7, 900), Duration::seconds(86399), hmsm(3, 5, 6, 900)); // overwrap + check(hmsm(3, 5, 7, 900), Duration::seconds(-86399), hmsm(3, 5, 8, 900)); + check(hmsm(3, 5, 7, 900), Duration::days(12345), hmsm(3, 5, 7, 900)); + + // regression tests for #37 + check(hmsm(0, 0, 0, 0), Duration::milliseconds(-990), hmsm(23, 59, 59, 10)); + check(hmsm(0, 0, 0, 0), Duration::milliseconds(-9990), hmsm(23, 59, 50, 10)); + } + + #[test] + fn test_time_sub() { + fn check(lhs: NaiveTime, rhs: NaiveTime, diff: Duration) { + // `time1 - time2 = duration` is equivalent to `time2 - time1 = -duration` + assert_eq!(lhs - rhs, diff); + assert_eq!(rhs - lhs, -diff); + } + + let hmsm = |h,m,s,mi| NaiveTime::from_hms_milli(h, m, s, mi); + + check(hmsm(3, 5, 7, 900), hmsm(3, 5, 7, 900), Duration::zero()); + check(hmsm(3, 5, 7, 900), hmsm(3, 5, 7, 600), Duration::milliseconds(300)); + check(hmsm(3, 5, 7, 200), hmsm(2, 4, 6, 200), Duration::seconds(3600 + 60 + 1)); + check(hmsm(3, 5, 7, 200), hmsm(2, 4, 6, 300), + Duration::seconds(3600 + 60) + Duration::milliseconds(900)); + + // treats the leap second as if it coincides with the prior non-leap second, + // as required by `time1 - time2 = duration` and `time2 - time1 = -duration` equivalence. + check(hmsm(3, 5, 7, 200), hmsm(3, 5, 6, 1_800), Duration::milliseconds(400)); + check(hmsm(3, 5, 7, 1_200), hmsm(3, 5, 6, 1_800), Duration::milliseconds(400)); + check(hmsm(3, 5, 7, 1_200), hmsm(3, 5, 6, 800), Duration::milliseconds(400)); + + // additional equality: `time1 + duration = time2` is equivalent to + // `time2 - time1 = duration` IF AND ONLY IF `time2` represents a non-leap second. + assert_eq!(hmsm(3, 5, 6, 800) + Duration::milliseconds(400), hmsm(3, 5, 7, 200)); + assert_eq!(hmsm(3, 5, 6, 1_800) + Duration::milliseconds(400), hmsm(3, 5, 7, 200)); + } + + #[test] + fn test_time_fmt() { + assert_eq!(format!("{}", NaiveTime::from_hms_milli(23, 59, 59, 999)), "23:59:59.999"); + assert_eq!(format!("{}", NaiveTime::from_hms_milli(23, 59, 59, 1_000)), "23:59:60"); + assert_eq!(format!("{}", NaiveTime::from_hms_milli(23, 59, 59, 1_001)), "23:59:60.001"); + assert_eq!(format!("{}", NaiveTime::from_hms_micro(0, 0, 0, 43210)), "00:00:00.043210"); + assert_eq!(format!("{}", NaiveTime::from_hms_nano(0, 0, 0, 6543210)), "00:00:00.006543210"); + + // the format specifier should have no effect on `NaiveTime` + assert_eq!(format!("{:30}", NaiveTime::from_hms_milli(3, 5, 7, 9)), "03:05:07.009"); + } + + #[test] + fn test_date_from_str() { + // valid cases + let valid = [ + "0:0:0", + "0:0:0.0000000", + "0:0:0.0000003", + " 4 : 3 : 2.1 ", + " 09:08:07 ", + " 9:8:07 ", + "23:59:60.373929310237", + ]; + for &s in &valid { + let d = match s.parse::() { + Ok(d) => d, + Err(e) => panic!("parsing `{}` has failed: {}", s, e) + }; + let s_ = format!("{:?}", d); + // `s` and `s_` may differ, but `s.parse()` and `s_.parse()` must be same + let d_ = match s_.parse::() { + Ok(d) => d, + Err(e) => panic!("`{}` is parsed into `{:?}`, but reparsing that has failed: {}", + s, d, e) + }; + assert!(d == d_, "`{}` is parsed into `{:?}`, but reparsed result \ + `{:?}` does not match", s, d, d_); + } + + // some invalid cases + // since `ParseErrorKind` is private, all we can do is to check if there was an error + assert!("".parse::().is_err()); + assert!("x".parse::().is_err()); + assert!("15".parse::().is_err()); + assert!("15:8".parse::().is_err()); + assert!("15:8:x".parse::().is_err()); + assert!("15:8:9x".parse::().is_err()); + assert!("23:59:61".parse::().is_err()); + assert!("12:34:56.x".parse::().is_err()); + assert!("12:34:56. 0".parse::().is_err()); + } + + #[test] + fn test_time_parse_from_str() { + let hms = |h,m,s| NaiveTime::from_hms(h,m,s); + assert_eq!(NaiveTime::parse_from_str("2014-5-7T12:34:56+09:30", "%Y-%m-%dT%H:%M:%S%z"), + Ok(hms(12, 34, 56))); // ignore date and offset + assert_eq!(NaiveTime::parse_from_str("PM 12:59", "%P %H:%M"), + Ok(hms(12, 59, 0))); + assert!(NaiveTime::parse_from_str("12:3456", "%H:%M:%S").is_err()); + } + + #[test] + fn test_time_format() { + let t = NaiveTime::from_hms_nano(3, 5, 7, 98765432); + assert_eq!(t.format("%H,%k,%I,%l,%P,%p").to_string(), "03, 3,03, 3,am,AM"); + assert_eq!(t.format("%M").to_string(), "05"); + assert_eq!(t.format("%S,%f,%.f").to_string(), "07,098765432,.098765432"); + assert_eq!(t.format("%.3f,%.6f,%.9f").to_string(), ".098,.098765,.098765432"); + assert_eq!(t.format("%R").to_string(), "03:05"); + assert_eq!(t.format("%T,%X").to_string(), "03:05:07,03:05:07"); + assert_eq!(t.format("%r").to_string(), "03:05:07 AM"); + assert_eq!(t.format("%t%n%%%n%t").to_string(), "\t\n%\n\t"); + + let t = NaiveTime::from_hms_micro(3, 5, 7, 432100); + assert_eq!(t.format("%S,%f,%.f").to_string(), "07,432100000,.432100"); + assert_eq!(t.format("%.3f,%.6f,%.9f").to_string(), ".432,.432100,.432100000"); + + let t = NaiveTime::from_hms_milli(3, 5, 7, 210); + assert_eq!(t.format("%S,%f,%.f").to_string(), "07,210000000,.210"); + assert_eq!(t.format("%.3f,%.6f,%.9f").to_string(), ".210,.210000,.210000000"); + + let t = NaiveTime::from_hms(3, 5, 7); + assert_eq!(t.format("%S,%f,%.f").to_string(), "07,000000000,"); + assert_eq!(t.format("%.3f,%.6f,%.9f").to_string(), ".000,.000000,.000000000"); + + // corner cases + assert_eq!(NaiveTime::from_hms(13, 57, 9).format("%r").to_string(), "01:57:09 PM"); + assert_eq!(NaiveTime::from_hms_milli(23, 59, 59, 1_000).format("%X").to_string(), + "23:59:60"); + } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/offset/fixed.rs cargo-0.19.0/vendor/chrono-0.2.25/src/offset/fixed.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/offset/fixed.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/offset/fixed.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,188 @@ +// This is a part of rust-chrono. +// Copyright (c) 2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +/*! + * The time zone which has a fixed offset from UTC. + */ + +use std::fmt; + +use div::div_mod_floor; +use duration::Duration; +use naive::date::NaiveDate; +use naive::datetime::NaiveDateTime; +use super::{TimeZone, Offset, LocalResult}; + +/// The time zone with fixed offset, from UTC-23:59:59 to UTC+23:59:59. +#[derive(Copy, Clone, PartialEq, Eq)] +pub struct FixedOffset { + local_minus_utc: i32, +} + +impl FixedOffset { + /// Makes a new `FixedOffset` from the serialized representation. + /// Used for serialization formats. + #[cfg(feature = "rustc-serialize")] + fn from_serialized(secs: i32) -> Option { + // check if the values are in the range + if secs <= -86400 || 86400 <= secs { return None; } + + let offset = FixedOffset { local_minus_utc: secs }; + Some(offset) + } + + /// Returns a serialized representation of this `FixedOffset`. + #[cfg(feature = "rustc-serialize")] + fn to_serialized(&self) -> i32 { + self.local_minus_utc + } + + /// Makes a new `FixedOffset` for the Eastern Hemisphere with given timezone difference. + /// The negative `secs` means the Western Hemisphere. + /// + /// Panics on the out-of-bound `secs`. + pub fn east(secs: i32) -> FixedOffset { + FixedOffset::east_opt(secs).expect("FixedOffset::east out of bounds") + } + + /// Makes a new `FixedOffset` for the Eastern Hemisphere with given timezone difference. + /// The negative `secs` means the Western Hemisphere. + /// + /// Returns `None` on the out-of-bound `secs`. + pub fn east_opt(secs: i32) -> Option { + if -86400 < secs && secs < 86400 { + Some(FixedOffset { local_minus_utc: secs }) + } else { + None + } + } + + /// Makes a new `FixedOffset` for the Western Hemisphere with given timezone difference. + /// The negative `secs` means the Eastern Hemisphere. + /// + /// Panics on the out-of-bound `secs`. + pub fn west(secs: i32) -> FixedOffset { + FixedOffset::west_opt(secs).expect("FixedOffset::west out of bounds") + } + + /// Makes a new `FixedOffset` for the Western Hemisphere with given timezone difference. + /// The negative `secs` means the Eastern Hemisphere. + /// + /// Returns `None` on the out-of-bound `secs`. + pub fn west_opt(secs: i32) -> Option { + if -86400 < secs && secs < 86400 { + Some(FixedOffset { local_minus_utc: -secs }) + } else { + None + } + } +} + +impl TimeZone for FixedOffset { + type Offset = FixedOffset; + + fn from_offset(offset: &FixedOffset) -> FixedOffset { offset.clone() } + + fn offset_from_local_date(&self, _local: &NaiveDate) -> LocalResult { + LocalResult::Single(self.clone()) + } + fn offset_from_local_datetime(&self, _local: &NaiveDateTime) -> LocalResult { + LocalResult::Single(self.clone()) + } + + fn offset_from_utc_date(&self, _utc: &NaiveDate) -> FixedOffset { self.clone() } + fn offset_from_utc_datetime(&self, _utc: &NaiveDateTime) -> FixedOffset { self.clone() } +} + +impl Offset for FixedOffset { + fn local_minus_utc(&self) -> Duration { Duration::seconds(self.local_minus_utc as i64) } +} + +impl fmt::Debug for FixedOffset { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let offset = self.local_minus_utc; + let (sign, offset) = if offset < 0 {('-', -offset)} else {('+', offset)}; + let (mins, sec) = div_mod_floor(offset, 60); + let (hour, min) = div_mod_floor(mins, 60); + if sec == 0 { + write!(f, "{}{:02}:{:02}", sign, hour, min) + } else { + write!(f, "{}{:02}:{:02}:{:02}", sign, hour, min, sec) + } + } +} + +impl fmt::Display for FixedOffset { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(self, f) } +} + +#[cfg(feature = "rustc-serialize")] +mod rustc_serialize { + use super::FixedOffset; + use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; + + // TODO the current serialization format is NEVER intentionally defined. + // this basically follows the automatically generated implementation for those traits, + // plus manual verification steps for avoiding security problem. + // in the future it is likely to be redefined to more sane and reasonable format. + + impl Encodable for FixedOffset { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + let secs = self.to_serialized(); + s.emit_struct("FixedOffset", 1, |s| { + try!(s.emit_struct_field("local_minus_utc", 0, |s| secs.encode(s))); + Ok(()) + }) + } + } + + impl Decodable for FixedOffset { + fn decode(d: &mut D) -> Result { + d.read_struct("FixedOffset", 1, |d| { + let secs = try!(d.read_struct_field("local_minus_utc", 0, Decodable::decode)); + FixedOffset::from_serialized(secs).ok_or_else(|| d.error("invalid offset")) + }) + } + } + + #[test] + fn test_encodable() { + use rustc_serialize::json::encode; + + assert_eq!(encode(&FixedOffset::east(0)).ok(), + Some(r#"{"local_minus_utc":0}"#.into())); + assert_eq!(encode(&FixedOffset::east(1234)).ok(), + Some(r#"{"local_minus_utc":1234}"#.into())); + assert_eq!(encode(&FixedOffset::east(86399)).ok(), + Some(r#"{"local_minus_utc":86399}"#.into())); + assert_eq!(encode(&FixedOffset::west(1234)).ok(), + Some(r#"{"local_minus_utc":-1234}"#.into())); + assert_eq!(encode(&FixedOffset::west(86399)).ok(), + Some(r#"{"local_minus_utc":-86399}"#.into())); + } + + #[test] + fn test_decodable() { + use rustc_serialize::json; + + let decode = |s: &str| json::decode::(s); + + assert_eq!(decode(r#"{"local_minus_utc":0}"#).ok(), Some(FixedOffset::east(0))); + assert_eq!(decode(r#"{"local_minus_utc": 1234}"#).ok(), Some(FixedOffset::east(1234))); + assert_eq!(decode(r#"{"local_minus_utc":86399}"#).ok(), Some(FixedOffset::east(86399))); + assert_eq!(decode(r#"{"local_minus_utc":-1234}"#).ok(), Some(FixedOffset::west(1234))); + assert_eq!(decode(r#"{"local_minus_utc":-86399}"#).ok(), Some(FixedOffset::west(86399))); + + assert!(decode(r#"{"local_minus_utc":86400}"#).is_err()); + assert!(decode(r#"{"local_minus_utc":-86400}"#).is_err()); + assert!(decode(r#"{"local_minus_utc":0.1}"#).is_err()); + assert!(decode(r#"{"local_minus_utc":null}"#).is_err()); + assert!(decode(r#"{}"#).is_err()); + assert!(decode(r#"0"#).is_err()); + assert!(decode(r#"1234"#).is_err()); + assert!(decode(r#""string""#).is_err()); + assert!(decode(r#"null"#).is_err()); + } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/offset/local.rs cargo-0.19.0/vendor/chrono-0.2.25/src/offset/local.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/offset/local.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/offset/local.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,130 @@ +// This is a part of rust-chrono. +// Copyright (c) 2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +/*! + * The local (system) time zone. + */ + +use stdtime; + +use {Datelike, Timelike}; +use duration::Duration; +use naive::date::NaiveDate; +use naive::time::NaiveTime; +use naive::datetime::NaiveDateTime; +use date::Date; +use datetime::DateTime; +use super::{TimeZone, LocalResult}; +use super::fixed::FixedOffset; + +/// Converts a `time::Tm` struct into the timezone-aware `DateTime`. +/// This assumes that `time` is working correctly, i.e. any error is fatal. +fn tm_to_datetime(mut tm: stdtime::Tm) -> DateTime { + if tm.tm_sec >= 60 { + tm.tm_nsec += (tm.tm_sec - 59) * 1_000_000_000; + tm.tm_sec = 59; + } + + #[cfg(not(windows))] + fn tm_to_naive_date(tm: &stdtime::Tm) -> NaiveDate { + // from_yo is more efficient than from_ymd (since it's the internal representation). + NaiveDate::from_yo(tm.tm_year + 1900, tm.tm_yday as u32 + 1) + } + + #[cfg(windows)] + fn tm_to_naive_date(tm: &stdtime::Tm) -> NaiveDate { + // ...but tm_yday is broken in Windows (issue #85) + NaiveDate::from_ymd(tm.tm_year + 1900, tm.tm_mon as u32 + 1, tm.tm_mday as u32) + } + + let date = tm_to_naive_date(&tm); + let time = NaiveTime::from_hms_nano(tm.tm_hour as u32, tm.tm_min as u32, + tm.tm_sec as u32, tm.tm_nsec as u32); + let offset = FixedOffset::east(tm.tm_utcoff); + DateTime::from_utc(date.and_time(time) + Duration::seconds(-tm.tm_utcoff as i64), offset) +} + +/// Converts a local `NaiveDateTime` to the `time::Timespec`. +fn datetime_to_timespec(d: &NaiveDateTime, local: bool) -> stdtime::Timespec { + // well, this exploits an undocumented `Tm::to_timespec` behavior + // to get the exact function we want (either `timegm` or `mktime`). + // the number 1 is arbitrary but should be non-zero to trigger `mktime`. + let tm_utcoff = if local {1} else {0}; + + let tm = stdtime::Tm { + tm_sec: d.second() as i32, + tm_min: d.minute() as i32, + tm_hour: d.hour() as i32, + tm_mday: d.day() as i32, + tm_mon: d.month0() as i32, // yes, C is that strange... + tm_year: d.year() - 1900, // this doesn't underflow, we know that d is `NaiveDateTime`. + tm_wday: 0, // to_local ignores this + tm_yday: 0, // and this + tm_isdst: -1, + tm_utcoff: tm_utcoff, + tm_nsec: d.nanosecond() as i32, + }; + tm.to_timespec() +} + +/// The local timescale. This is implemented via the standard `time` crate. +#[derive(Copy, Clone)] +#[cfg_attr(feature = "rustc-serialize", derive(RustcEncodable, RustcDecodable))] +pub struct Local; + +impl Local { + /// Returns a `Date` which corresponds to the current date. + pub fn today() -> Date { + Local::now().date() + } + + /// Returns a `DateTime` which corresponds to the current date. + pub fn now() -> DateTime { + tm_to_datetime(stdtime::now()) + } +} + +impl TimeZone for Local { + type Offset = FixedOffset; + + fn from_offset(_offset: &FixedOffset) -> Local { Local } + + // they are easier to define in terms of the finished date and time unlike other offsets + fn offset_from_local_date(&self, local: &NaiveDate) -> LocalResult { + self.from_local_date(local).map(|date| *date.offset()) + } + fn offset_from_local_datetime(&self, local: &NaiveDateTime) -> LocalResult { + self.from_local_datetime(local).map(|datetime| *datetime.offset()) + } + + fn offset_from_utc_date(&self, utc: &NaiveDate) -> FixedOffset { + *self.from_utc_date(utc).offset() + } + fn offset_from_utc_datetime(&self, utc: &NaiveDateTime) -> FixedOffset { + *self.from_utc_datetime(utc).offset() + } + + // override them for avoiding redundant works + fn from_local_date(&self, local: &NaiveDate) -> LocalResult> { + // this sounds very strange, but required for keeping `TimeZone::ymd` sane. + // in the other words, we use the offset at the local midnight + // but keep the actual date unaltered (much like `FixedOffset`). + let midnight = self.from_local_datetime(&local.and_hms(0, 0, 0)); + midnight.map(|datetime| Date::from_utc(*local, datetime.offset().clone())) + } + fn from_local_datetime(&self, local: &NaiveDateTime) -> LocalResult> { + let timespec = datetime_to_timespec(local, true); + LocalResult::Single(tm_to_datetime(stdtime::at(timespec))) + } + + fn from_utc_date(&self, utc: &NaiveDate) -> Date { + let midnight = self.from_utc_datetime(&utc.and_hms(0, 0, 0)); + Date::from_utc(*utc, midnight.offset().clone()) + } + fn from_utc_datetime(&self, utc: &NaiveDateTime) -> DateTime { + let timespec = datetime_to_timespec(utc, false); + tm_to_datetime(stdtime::at(timespec)) + } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/offset/mod.rs cargo-0.19.0/vendor/chrono-0.2.25/src/offset/mod.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/offset/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/offset/mod.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,334 @@ +// This is a part of rust-chrono. +// Copyright (c) 2014-2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +/*! + * The time zone, which calculates offsets from the local time to UTC. + * + * There are three operations provided by the `TimeZone` trait: + * + * 1. Converting the local `NaiveDateTime` to `DateTime` + * 2. Converting the UTC `NaiveDateTime` to `DateTime` + * 3. Converting `DateTime` to the local `NaiveDateTime` + * + * 1 is used for constructors. 2 is used for the `with_timezone` method of date and time types. + * 3 is used for other methods, e.g. `year()` or `format()`, and provided by an associated type + * which implements `Offset` (which then passed to `TimeZone` for actual implementations). + * Technically speaking `TimeZone` has a total knowledge about given timescale, + * but `Offset` is used as a cache to avoid the repeated conversion + * and provides implementations for 1 and 3. + * An `TimeZone` instance can be reconstructed from the corresponding `Offset` instance. + */ + +use std::fmt; + +use Weekday; +use duration::Duration; +use naive::date::NaiveDate; +use naive::time::NaiveTime; +use naive::datetime::NaiveDateTime; +use date::Date; +use datetime::DateTime; +use format::{parse, Parsed, ParseResult, StrftimeItems}; + +/// The conversion result from the local time to the timezone-aware datetime types. +#[derive(Clone, PartialEq, Debug)] +pub enum LocalResult { + /// Given local time representation is invalid. + /// This can occur when, for example, the positive timezone transition. + None, + /// Given local time representation has a single unique result. + Single(T), + /// Given local time representation has multiple results and thus ambiguous. + /// This can occur when, for example, the negative timezone transition. + Ambiguous(T /*min*/, T /*max*/), +} + +impl LocalResult { + /// Returns `Some` only when the conversion result is unique, or `None` otherwise. + pub fn single(self) -> Option { + match self { LocalResult::Single(t) => Some(t), _ => None } + } + + /// Returns `Some` for the earliest possible conversion result, or `None` if none. + pub fn earliest(self) -> Option { + match self { LocalResult::Single(t) | LocalResult::Ambiguous(t,_) => Some(t), _ => None } + } + + /// Returns `Some` for the latest possible conversion result, or `None` if none. + pub fn latest(self) -> Option { + match self { LocalResult::Single(t) | LocalResult::Ambiguous(_,t) => Some(t), _ => None } + } + + /// Maps a `LocalResult` into `LocalResult` with given function. + pub fn map U>(self, mut f: F) -> LocalResult { + match self { + LocalResult::None => LocalResult::None, + LocalResult::Single(v) => LocalResult::Single(f(v)), + LocalResult::Ambiguous(min, max) => LocalResult::Ambiguous(f(min), f(max)), + } + } +} + +impl LocalResult> { + /// Makes a new `DateTime` from the current date and given `NaiveTime`. + /// The offset in the current date is preserved. + /// + /// Propagates any error. Ambiguous result would be discarded. + #[inline] + pub fn and_time(self, time: NaiveTime) -> LocalResult> { + match self { + LocalResult::Single(d) => d.and_time(time) + .map_or(LocalResult::None, LocalResult::Single), + _ => LocalResult::None, + } + } + + /// Makes a new `DateTime` from the current date, hour, minute and second. + /// The offset in the current date is preserved. + /// + /// Propagates any error. Ambiguous result would be discarded. + #[inline] + pub fn and_hms_opt(self, hour: u32, min: u32, sec: u32) -> LocalResult> { + match self { + LocalResult::Single(d) => d.and_hms_opt(hour, min, sec) + .map_or(LocalResult::None, LocalResult::Single), + _ => LocalResult::None, + } + } + + /// Makes a new `DateTime` from the current date, hour, minute, second and millisecond. + /// The millisecond part can exceed 1,000 in order to represent the leap second. + /// The offset in the current date is preserved. + /// + /// Propagates any error. Ambiguous result would be discarded. + #[inline] + pub fn and_hms_milli_opt(self, hour: u32, min: u32, sec: u32, + milli: u32) -> LocalResult> { + match self { + LocalResult::Single(d) => d.and_hms_milli_opt(hour, min, sec, milli) + .map_or(LocalResult::None, LocalResult::Single), + _ => LocalResult::None, + } + } + + /// Makes a new `DateTime` from the current date, hour, minute, second and microsecond. + /// The microsecond part can exceed 1,000,000 in order to represent the leap second. + /// The offset in the current date is preserved. + /// + /// Propagates any error. Ambiguous result would be discarded. + #[inline] + pub fn and_hms_micro_opt(self, hour: u32, min: u32, sec: u32, + micro: u32) -> LocalResult> { + match self { + LocalResult::Single(d) => d.and_hms_micro_opt(hour, min, sec, micro) + .map_or(LocalResult::None, LocalResult::Single), + _ => LocalResult::None, + } + } + + /// Makes a new `DateTime` from the current date, hour, minute, second and nanosecond. + /// The nanosecond part can exceed 1,000,000,000 in order to represent the leap second. + /// The offset in the current date is preserved. + /// + /// Propagates any error. Ambiguous result would be discarded. + #[inline] + pub fn and_hms_nano_opt(self, hour: u32, min: u32, sec: u32, + nano: u32) -> LocalResult> { + match self { + LocalResult::Single(d) => d.and_hms_nano_opt(hour, min, sec, nano) + .map_or(LocalResult::None, LocalResult::Single), + _ => LocalResult::None, + } + } + +} + +impl LocalResult { + /// Returns the single unique conversion result, or panics accordingly. + pub fn unwrap(self) -> T { + match self { + LocalResult::None => panic!("No such local time"), + LocalResult::Single(t) => t, + LocalResult::Ambiguous(t1,t2) => { + panic!("Ambiguous local time, ranging from {:?} to {:?}", t1, t2) + } + } + } +} + +/// The offset from the local time to UTC. +pub trait Offset: Sized + Clone + fmt::Debug { + /// Returns the offset from UTC to the local time stored. + fn local_minus_utc(&self) -> Duration; +} + +/// The time zone. +pub trait TimeZone: Sized + Clone { + /// An associated offset type. + /// This type is used to store the actual offset in date and time types. + /// The original `TimeZone` value can be recovered via `TimeZone::from_offset`. + type Offset: Offset; + + /// Makes a new `Date` from year, month, day and the current time zone. + /// This assumes the proleptic Gregorian calendar, with the year 0 being 1 BCE. + /// + /// The time zone normally does not affect the date (unless it is between UTC-24 and UTC+24), + /// but it will propagate to the `DateTime` values constructed via this date. + /// + /// Panics on the out-of-range date, invalid month and/or day. + fn ymd(&self, year: i32, month: u32, day: u32) -> Date { + self.ymd_opt(year, month, day).unwrap() + } + + /// Makes a new `Date` from year, month, day and the current time zone. + /// This assumes the proleptic Gregorian calendar, with the year 0 being 1 BCE. + /// + /// The time zone normally does not affect the date (unless it is between UTC-24 and UTC+24), + /// but it will propagate to the `DateTime` values constructed via this date. + /// + /// Returns `None` on the out-of-range date, invalid month and/or day. + fn ymd_opt(&self, year: i32, month: u32, day: u32) -> LocalResult> { + match NaiveDate::from_ymd_opt(year, month, day) { + Some(d) => self.from_local_date(&d), + None => LocalResult::None, + } + } + + /// Makes a new `Date` from year, day of year (DOY or "ordinal") and the current time zone. + /// This assumes the proleptic Gregorian calendar, with the year 0 being 1 BCE. + /// + /// The time zone normally does not affect the date (unless it is between UTC-24 and UTC+24), + /// but it will propagate to the `DateTime` values constructed via this date. + /// + /// Panics on the out-of-range date and/or invalid DOY. + fn yo(&self, year: i32, ordinal: u32) -> Date { + self.yo_opt(year, ordinal).unwrap() + } + + /// Makes a new `Date` from year, day of year (DOY or "ordinal") and the current time zone. + /// This assumes the proleptic Gregorian calendar, with the year 0 being 1 BCE. + /// + /// The time zone normally does not affect the date (unless it is between UTC-24 and UTC+24), + /// but it will propagate to the `DateTime` values constructed via this date. + /// + /// Returns `None` on the out-of-range date and/or invalid DOY. + fn yo_opt(&self, year: i32, ordinal: u32) -> LocalResult> { + match NaiveDate::from_yo_opt(year, ordinal) { + Some(d) => self.from_local_date(&d), + None => LocalResult::None, + } + } + + /// Makes a new `Date` from ISO week date (year and week number), day of the week (DOW) and + /// the current time zone. + /// This assumes the proleptic Gregorian calendar, with the year 0 being 1 BCE. + /// The resulting `Date` may have a different year from the input year. + /// + /// The time zone normally does not affect the date (unless it is between UTC-24 and UTC+24), + /// but it will propagate to the `DateTime` values constructed via this date. + /// + /// Panics on the out-of-range date and/or invalid week number. + fn isoywd(&self, year: i32, week: u32, weekday: Weekday) -> Date { + self.isoywd_opt(year, week, weekday).unwrap() + } + + /// Makes a new `Date` from ISO week date (year and week number), day of the week (DOW) and + /// the current time zone. + /// This assumes the proleptic Gregorian calendar, with the year 0 being 1 BCE. + /// The resulting `Date` may have a different year from the input year. + /// + /// The time zone normally does not affect the date (unless it is between UTC-24 and UTC+24), + /// but it will propagate to the `DateTime` values constructed via this date. + /// + /// Returns `None` on the out-of-range date and/or invalid week number. + fn isoywd_opt(&self, year: i32, week: u32, weekday: Weekday) -> LocalResult> { + match NaiveDate::from_isoywd_opt(year, week, weekday) { + Some(d) => self.from_local_date(&d), + None => LocalResult::None, + } + } + + /// Makes a new `DateTime` from the number of non-leap seconds + /// since January 1, 1970 0:00:00 UTC (aka "UNIX timestamp") + /// and the number of nanoseconds since the last whole non-leap second. + /// + /// Panics on the out-of-range number of seconds and/or invalid nanosecond. + fn timestamp(&self, secs: i64, nsecs: u32) -> DateTime { + self.timestamp_opt(secs, nsecs).unwrap() + } + + /// Makes a new `DateTime` from the number of non-leap seconds + /// since January 1, 1970 0:00:00 UTC (aka "UNIX timestamp") + /// and the number of nanoseconds since the last whole non-leap second. + /// + /// Returns `None` on the out-of-range number of seconds and/or invalid nanosecond. + fn timestamp_opt(&self, secs: i64, nsecs: u32) -> LocalResult> { + match NaiveDateTime::from_timestamp_opt(secs, nsecs) { + Some(dt) => LocalResult::Single(self.from_utc_datetime(&dt)), + None => LocalResult::None, + } + } + + /// Parses a string with the specified format string and + /// returns a `DateTime` with the current offset. + /// See the [`format::strftime` module](../../format/strftime/index.html) + /// on the supported escape sequences. + /// + /// If the format does not include offsets, the current offset is assumed; + /// otherwise the input should have a matching UTC offset. + /// + /// See also `DateTime::parse_from_str` which gives a local `DateTime` + /// with parsed `FixedOffset`. + fn datetime_from_str(&self, s: &str, fmt: &str) -> ParseResult> { + let mut parsed = Parsed::new(); + try!(parse(&mut parsed, s, StrftimeItems::new(fmt))); + parsed.to_datetime_with_timezone(self) + } + + /// Reconstructs the time zone from the offset. + fn from_offset(offset: &Self::Offset) -> Self; + + /// Creates the offset(s) for given local `NaiveDate` if possible. + fn offset_from_local_date(&self, local: &NaiveDate) -> LocalResult; + + /// Creates the offset(s) for given local `NaiveDateTime` if possible. + fn offset_from_local_datetime(&self, local: &NaiveDateTime) -> LocalResult; + + /// Converts the local `NaiveDate` to the timezone-aware `Date` if possible. + fn from_local_date(&self, local: &NaiveDate) -> LocalResult> { + self.offset_from_local_date(local).map(|offset| { + Date::from_utc(*local - offset.local_minus_utc(), offset) + }) + } + + /// Converts the local `NaiveDateTime` to the timezone-aware `DateTime` if possible. + fn from_local_datetime(&self, local: &NaiveDateTime) -> LocalResult> { + self.offset_from_local_datetime(local).map(|offset| { + DateTime::from_utc(*local - offset.local_minus_utc(), offset) + }) + } + + /// Creates the offset for given UTC `NaiveDate`. This cannot fail. + fn offset_from_utc_date(&self, utc: &NaiveDate) -> Self::Offset; + + /// Creates the offset for given UTC `NaiveDateTime`. This cannot fail. + fn offset_from_utc_datetime(&self, utc: &NaiveDateTime) -> Self::Offset; + + /// Converts the UTC `NaiveDate` to the local time. + /// The UTC is continuous and thus this cannot fail (but can give the duplicate local time). + fn from_utc_date(&self, utc: &NaiveDate) -> Date { + Date::from_utc(utc.clone(), self.offset_from_utc_date(utc)) + } + + /// Converts the UTC `NaiveDateTime` to the local time. + /// The UTC is continuous and thus this cannot fail (but can give the duplicate local time). + fn from_utc_datetime(&self, utc: &NaiveDateTime) -> DateTime { + DateTime::from_utc(utc.clone(), self.offset_from_utc_datetime(utc)) + } +} + +pub mod utc; +pub mod fixed; +pub mod local; + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/src/offset/utc.rs cargo-0.19.0/vendor/chrono-0.2.25/src/offset/utc.rs --- cargo-0.17.0/vendor/chrono-0.2.25/src/offset/utc.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/src/offset/utc.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,64 @@ +// This is a part of rust-chrono. +// Copyright (c) 2015, Kang Seonghoon. +// See README.md and LICENSE.txt for details. + +/*! + * The UTC (Coordinated Universal Time) time zone. + */ + +use std::fmt; +use stdtime; + +use duration::Duration; +use naive::date::NaiveDate; +use naive::datetime::NaiveDateTime; +use date::Date; +use datetime::DateTime; +use super::{TimeZone, Offset, LocalResult}; + +/// The UTC time zone. This is the most efficient time zone when you don't need the local time. +/// It is also used as an offset (which is also a dummy type). +#[derive(Copy, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "rustc-serialize", derive(RustcEncodable, RustcDecodable))] +pub struct UTC; + +impl UTC { + /// Returns a `Date` which corresponds to the current date. + pub fn today() -> Date { UTC::now().date() } + + /// Returns a `DateTime` which corresponds to the current date. + pub fn now() -> DateTime { + let spec = stdtime::get_time(); + let naive = NaiveDateTime::from_timestamp(spec.sec, spec.nsec as u32); + DateTime::from_utc(naive, UTC) + } +} + +impl TimeZone for UTC { + type Offset = UTC; + + fn from_offset(_state: &UTC) -> UTC { UTC } + + fn offset_from_local_date(&self, _local: &NaiveDate) -> LocalResult { + LocalResult::Single(UTC) + } + fn offset_from_local_datetime(&self, _local: &NaiveDateTime) -> LocalResult { + LocalResult::Single(UTC) + } + + fn offset_from_utc_date(&self, _utc: &NaiveDate) -> UTC { UTC } + fn offset_from_utc_datetime(&self, _utc: &NaiveDateTime) -> UTC { UTC} +} + +impl Offset for UTC { + fn local_minus_utc(&self) -> Duration { Duration::zero() } +} + +impl fmt::Debug for UTC { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Z") } +} + +impl fmt::Display for UTC { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "UTC") } +} + diff -Nru cargo-0.17.0/vendor/chrono-0.2.25/.travis.yml cargo-0.19.0/vendor/chrono-0.2.25/.travis.yml --- cargo-0.17.0/vendor/chrono-0.2.25/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/chrono-0.2.25/.travis.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,18 @@ +language: rust +os: + - linux + - osx +env: + global: + - LD_LIBRARY_PATH: /usr/local/lib + - secure: i8Ijk6g4/26e3e7+r2OeGAPSP8G8O9P50JibW1omJ0j0ixXhyhPoY2bch3CGhnOu44dI5O31IIbjJJ+iEMp29xQBvkv9YpxAI+hIzOP+XAH6GCYxUDiBVcDoWrXTj+wU6/veuvjLCunu4eRHlskrgJbZXhUVODYzJuLgsN8Ou0w= +script: + - cargo build -v + - cargo build -v --features rustc-serialize + - cargo build -v --features serde + - cargo test -v + - cargo test -v --features rustc-serialize + - cargo test -v --features serde + - cargo doc +after_script: + - cd target && curl http://www.rust-ci.org/artifacts/put?t=$RUSTCI_TOKEN | sh diff -Nru cargo-0.17.0/vendor/cmake-0.1.19/.cargo-checksum.json cargo-0.19.0/vendor/cmake-0.1.19/.cargo-checksum.json --- cargo-0.17.0/vendor/cmake-0.1.19/.cargo-checksum.json 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.19/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"c5565ac6e1981bf3a88d132c16e381411a239a1c25ec140ee13cf2d50f1f97d0","Cargo.toml":"4b25859c27c389eeeadc5daa2cc6604a2b8558ece8bbe6abcbe779b30562fb2e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"8ca528d20639506546044c676ff9069e3e850937b02bff4194dcf9e5c3c50d64","src/lib.rs":"95fa469fa525fd1e3f28cb51b839b88af9c13a3eb1c92e8fb8df27dba1fbb1f3","src/registry.rs":"ca16433f51b5e3aedb0560bba41370b0c42de9238926a5118d1c0a3a072b64b2"},"package":"8ebde6558caa6cf9bffe5750c66c517e7f9d470d59fcd48b0acbc0a02d62a82a"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/cmake-0.1.19/Cargo.toml cargo-0.19.0/vendor/cmake-0.1.19/Cargo.toml --- cargo-0.17.0/vendor/cmake-0.1.19/Cargo.toml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.19/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -[package] - -name = "cmake" -version = "0.1.19" -authors = ["Alex Crichton "] -license = "MIT/Apache-2.0" -readme = "README.md" -keywords = ["build-dependencies"] -repository = "https://github.com/alexcrichton/cmake-rs" -homepage = "https://github.com/alexcrichton/cmake-rs" -documentation = "http://alexcrichton.com/cmake-rs" -description = """ -A build dependency for running `cmake` to build a native library -""" - -[dependencies] -gcc = "0.3.17" diff -Nru cargo-0.17.0/vendor/cmake-0.1.19/.gitignore cargo-0.19.0/vendor/cmake-0.1.19/.gitignore --- cargo-0.17.0/vendor/cmake-0.1.19/.gitignore 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.19/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -/target -/Cargo.lock diff -Nru cargo-0.17.0/vendor/cmake-0.1.19/LICENSE-APACHE cargo-0.19.0/vendor/cmake-0.1.19/LICENSE-APACHE --- cargo-0.17.0/vendor/cmake-0.1.19/LICENSE-APACHE 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.19/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.17.0/vendor/cmake-0.1.19/LICENSE-MIT cargo-0.19.0/vendor/cmake-0.1.19/LICENSE-MIT --- cargo-0.17.0/vendor/cmake-0.1.19/LICENSE-MIT 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.19/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) 2014 Alex Crichton - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/cmake-0.1.19/README.md cargo-0.19.0/vendor/cmake-0.1.19/README.md --- cargo-0.17.0/vendor/cmake-0.1.19/README.md 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.19/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -# cmake - -[![Build Status](https://travis-ci.org/alexcrichton/cmake-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/cmake-rs) - -[Documentation](http://alexcrichton.com/cmake-rs) - -A build dependency for running the `cmake` build tool to compile a native -library. - -```toml -# Cargo.toml -[build-dependencies] -cmake = "0.2" -``` - -# License - -`cmake-rs` is primarily distributed under the terms of both the MIT license and -the Apache License (Version 2.0), with portions covered by various BSD-like -licenses. - -See LICENSE-APACHE, and LICENSE-MIT for details. diff -Nru cargo-0.17.0/vendor/cmake-0.1.19/src/lib.rs cargo-0.19.0/vendor/cmake-0.1.19/src/lib.rs --- cargo-0.17.0/vendor/cmake-0.1.19/src/lib.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.19/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,543 +0,0 @@ -//! A build dependency for running `cmake` to build a native library -//! -//! This crate provides some necessary boilerplate and shim support for running -//! the system `cmake` command to build a native library. It will add -//! appropriate cflags for building code to link into Rust, handle cross -//! compilation, and use the necessary generator for the platform being -//! targeted. -//! -//! The builder-style configuration allows for various variables and such to be -//! passed down into the build as well. -//! -//! ## Installation -//! -//! Add this to your `Cargo.toml`: -//! -//! ```toml -//! [build-dependencies] -//! cmake = "0.1" -//! ``` -//! -//! ## Examples -//! -//! ```no_run -//! use cmake; -//! -//! // Builds the project in the directory located in `libfoo`, installing it -//! // into $OUT_DIR -//! let dst = cmake::build("libfoo"); -//! -//! println!("cargo:rustc-link-search=native={}", dst.display()); -//! println!("cargo:rustc-link-lib=static=foo"); -//! ``` -//! -//! ```no_run -//! use cmake::Config; -//! -//! let dst = Config::new("libfoo") -//! .define("FOO", "BAR") -//! .cflag("-foo") -//! .build(); -//! println!("cargo:rustc-link-search=native={}", dst.display()); -//! println!("cargo:rustc-link-lib=static=foo"); -//! ``` - -#![deny(missing_docs)] - -extern crate gcc; - -use std::env; -use std::ffi::{OsString, OsStr}; -use std::fs::{self, File}; -use std::io::ErrorKind; -use std::io::prelude::*; -use std::path::{Path, PathBuf}; -use std::process::Command; - -#[cfg(windows)] -mod registry; - -/// Builder style configuration for a pending CMake build. -pub struct Config { - path: PathBuf, - generator: Option, - cflags: OsString, - cxxflags: OsString, - defines: Vec<(OsString, OsString)>, - deps: Vec, - target: Option, - host: Option, - out_dir: Option, - profile: Option, - build_args: Vec, - cmake_target: Option, -} - -/// Builds the native library rooted at `path` with the default cmake options. -/// This will return the directory in which the library was installed. -/// -/// # Examples -/// -/// ```no_run -/// use cmake; -/// -/// // Builds the project in the directory located in `libfoo`, installing it -/// // into $OUT_DIR -/// let dst = cmake::build("libfoo"); -/// -/// println!("cargo:rustc-link-search=native={}", dst.display()); -/// println!("cargo:rustc-link-lib=static=foo"); -/// ``` -/// -pub fn build>(path: P) -> PathBuf { - Config::new(path.as_ref()).build() -} - -impl Config { - /// Creates a new blank set of configuration to build the project specified - /// at the path `path`. - pub fn new>(path: P) -> Config { - Config { - path: env::current_dir().unwrap().join(path), - generator: None, - cflags: OsString::new(), - cxxflags: OsString::new(), - defines: Vec::new(), - deps: Vec::new(), - profile: None, - out_dir: None, - target: None, - host: None, - build_args: Vec::new(), - cmake_target: None, - } - } - - /// Sets the build-tool generator (`-G`) for this compilation. - pub fn generator>(&mut self, generator: T) -> &mut Config { - self.generator = Some(generator.as_ref().to_owned()); - self - } - - /// Adds a custom flag to pass down to the C compiler, supplementing those - /// that this library already passes. - pub fn cflag>(&mut self, flag: P) -> &mut Config { - self.cflags.push(" "); - self.cflags.push(flag.as_ref()); - self - } - - /// Adds a custom flag to pass down to the C++ compiler, supplementing those - /// that this library already passes. - pub fn cxxflag>(&mut self, flag: P) -> &mut Config { - self.cxxflags.push(" "); - self.cxxflags.push(flag.as_ref()); - self - } - - /// Adds a new `-D` flag to pass to cmake during the generation step. - pub fn define(&mut self, k: K, v: V) -> &mut Config - where K: AsRef, V: AsRef - { - self.defines.push((k.as_ref().to_owned(), v.as_ref().to_owned())); - self - } - - /// Registers a dependency for this compilation on the native library built - /// by Cargo previously. - /// - /// This registration will modify the `CMAKE_PREFIX_PATH` environment - /// variable for the build system generation step. - pub fn register_dep(&mut self, dep: &str) -> &mut Config { - self.deps.push(dep.to_string()); - self - } - - /// Sets the target triple for this compilation. - /// - /// This is automatically scraped from `$TARGET` which is set for Cargo - /// build scripts so it's not necessary to call this from a build script. - pub fn target(&mut self, target: &str) -> &mut Config { - self.target = Some(target.to_string()); - self - } - - /// Sets the host triple for this compilation. - /// - /// This is automatically scraped from `$HOST` which is set for Cargo - /// build scripts so it's not necessary to call this from a build script. - pub fn host(&mut self, host: &str) -> &mut Config { - self.host = Some(host.to_string()); - self - } - - /// Sets the output directory for this compilation. - /// - /// This is automatically scraped from `$OUT_DIR` which is set for Cargo - /// build scripts so it's not necessary to call this from a build script. - pub fn out_dir>(&mut self, out: P) -> &mut Config { - self.out_dir = Some(out.as_ref().to_path_buf()); - self - } - - /// Sets the profile for this compilation. - /// - /// This is automatically scraped from `$PROFILE` which is set for Cargo - /// build scripts so it's not necessary to call this from a build script. - pub fn profile(&mut self, profile: &str) -> &mut Config { - self.profile = Some(profile.to_string()); - self - } - - /// Add an argument to the final `cmake` build step - pub fn build_arg>(&mut self, arg: A) -> &mut Config { - self.build_args.push(arg.as_ref().to_owned()); - self - } - - /// Sets the build target for the final `cmake` build step, this will - /// default to "install" if not specified. - pub fn build_target(&mut self, target: &str) -> &mut Config { - self.cmake_target = Some(target.to_string()); - self - } - - /// Run this configuration, compiling the library with all the configured - /// options. - /// - /// This will run both the build system generator command as well as the - /// command to build the library. - pub fn build(&mut self) -> PathBuf { - let target = self.target.clone().unwrap_or_else(|| { - getenv_unwrap("TARGET") - }); - let host = self.host.clone().unwrap_or_else(|| { - getenv_unwrap("HOST") - }); - let msvc = target.contains("msvc"); - let c_compiler = gcc::Config::new().cargo_metadata(false) - .opt_level(0) - .debug(false) - .target(&target) - .host(&host) - .get_compiler(); - let cxx_compiler = gcc::Config::new().cargo_metadata(false) - .cpp(true) - .opt_level(0) - .debug(false) - .target(&target) - .host(&host) - .get_compiler(); - - let dst = self.out_dir.clone().unwrap_or_else(|| { - PathBuf::from(getenv_unwrap("OUT_DIR")) - }); - let build = dst.join("build"); - self.maybe_clear(&build); - let _ = fs::create_dir(&build); - - // Add all our dependencies to our cmake paths - let mut cmake_prefix_path = Vec::new(); - for dep in &self.deps { - if let Some(root) = env::var_os(&format!("DEP_{}_ROOT", dep)) { - cmake_prefix_path.push(PathBuf::from(root)); - } - } - let system_prefix = env::var_os("CMAKE_PREFIX_PATH") - .unwrap_or(OsString::new()); - cmake_prefix_path.extend(env::split_paths(&system_prefix) - .map(|s| s.to_owned())); - let cmake_prefix_path = env::join_paths(&cmake_prefix_path).unwrap(); - - // Build up the first cmake command to build the build system. - let mut cmd = Command::new("cmake"); - cmd.arg(&self.path) - .current_dir(&build); - if target.contains("windows-gnu") { - if host.contains("windows") { - // On MinGW we need to coerce cmake to not generate a visual - // studio build system but instead use makefiles that MinGW can - // use to build. - if self.generator.is_none() { - cmd.arg("-G").arg("MSYS Makefiles"); - } - } else { - // If we're cross compiling onto windows, then set some - // variables which will hopefully get things to succeed. Some - // systems may need the `windres` or `dlltool` variables set, so - // set them if possible. - if !self.defined("CMAKE_SYSTEM_NAME") { - cmd.arg("-DCMAKE_SYSTEM_NAME=Windows"); - } - if !self.defined("CMAKE_RC_COMPILER") { - let exe = find_exe(c_compiler.path()); - if let Some(name) = exe.file_name().unwrap().to_str() { - let name = name.replace("gcc", "windres"); - let windres = exe.with_file_name(name); - if windres.is_file() { - let mut arg = OsString::from("-DCMAKE_RC_COMPILER="); - arg.push(&windres); - cmd.arg(arg); - } - } - } - } - } else if msvc { - // If we're on MSVC we need to be sure to use the right generator or - // otherwise we won't get 32/64 bit correct automatically. - if self.generator.is_none() { - cmd.arg("-G").arg(self.visual_studio_generator(&target)); - } - } - if let Some(ref generator) = self.generator { - cmd.arg("-G").arg(generator); - } - let profile = self.profile.clone().unwrap_or_else(|| { - match &getenv_unwrap("PROFILE")[..] { - "bench" | "release" => "Release", - // currently we need to always use the same CRT for MSVC - _ if msvc => "Release", - _ => "Debug", - }.to_string() - }); - for &(ref k, ref v) in &self.defines { - let mut os = OsString::from("-D"); - os.push(k); - os.push("="); - os.push(v); - cmd.arg(os); - } - - if !self.defined("CMAKE_INSTALL_PREFIX") { - let mut dstflag = OsString::from("-DCMAKE_INSTALL_PREFIX="); - dstflag.push(&dst); - cmd.arg(dstflag); - } - - let build_type = self.defines.iter().find(|&&(ref a, _)| { - a == "CMAKE_BUILD_TYPE" - }).map(|x| x.1.to_str().unwrap()).unwrap_or(&profile); - let build_type_upcase = build_type.chars() - .flat_map(|c| c.to_uppercase()) - .collect::(); - - { - let mut set_compiler = |kind: &str, - compiler: &gcc::Tool, - extra: &OsString| { - let flag_var = format!("CMAKE_{}_FLAGS", kind); - let tool_var = format!("CMAKE_{}_COMPILER", kind); - if !self.defined(&flag_var) { - let mut flagsflag = OsString::from("-D"); - flagsflag.push(&flag_var); - flagsflag.push("="); - flagsflag.push(extra); - for arg in compiler.args() { - flagsflag.push(" "); - flagsflag.push(arg); - } - cmd.arg(flagsflag); - } - - let flag_var_alt = format!("CMAKE_{}_FLAGS_{}", kind, - build_type_upcase); - if !self.defined(&flag_var_alt) { - let mut flagsflag = OsString::from("-D"); - flagsflag.push(&flag_var_alt); - flagsflag.push("="); - flagsflag.push(extra); - for arg in compiler.args() { - flagsflag.push(" "); - flagsflag.push(arg); - } - cmd.arg(flagsflag); - } - - // Apparently cmake likes to have an absolute path to the - // compiler as otherwise it sometimes thinks that this variable - // changed as it thinks the found compiler, /usr/bin/cc, - // differs from the specified compiler, cc. Not entirely sure - // what's up, but at least this means cmake doesn't get - // confused? - // - // Also don't specify this on Windows as it's not needed for - // MSVC and for MinGW it doesn't really vary. - if !self.defined("CMAKE_TOOLCHAIN_FILE") - && !self.defined(&tool_var) - && env::consts::FAMILY != "windows" { - let mut ccompiler = OsString::from("-D"); - ccompiler.push(&tool_var); - ccompiler.push("="); - ccompiler.push(find_exe(compiler.path())); - cmd.arg(ccompiler); - } - }; - - set_compiler("C", &c_compiler, &self.cflags); - set_compiler("CXX", &cxx_compiler, &self.cxxflags); - } - - if !self.defined("CMAKE_BUILD_TYPE") { - cmd.arg(&format!("-DCMAKE_BUILD_TYPE={}", profile)); - } - - if !self.defined("CMAKE_TOOLCHAIN_FILE") { - if let Ok(s) = env::var("CMAKE_TOOLCHAIN_FILE") { - cmd.arg(&format!("-DCMAKE_TOOLCHAIN_FILE={}", s)); - } - } - - run(cmd.env("CMAKE_PREFIX_PATH", cmake_prefix_path), "cmake"); - - let mut parallel_args = Vec::new(); - if fs::metadata(&dst.join("build/Makefile")).is_ok() { - if let Ok(s) = env::var("NUM_JOBS") { - parallel_args.push(format!("-j{}", s)); - } - } - - // And build! - let target = self.cmake_target.clone().unwrap_or("install".to_string()); - run(Command::new("cmake") - .arg("--build").arg(".") - .arg("--target").arg(target) - .arg("--config").arg(&profile) - .arg("--").args(&self.build_args) - .args(¶llel_args) - .current_dir(&build), "cmake"); - - println!("cargo:root={}", dst.display()); - return dst - } - - fn visual_studio_generator(&self, target: &str) -> String { - let base = match std::env::var("VisualStudioVersion") { - Ok(version) => { - match &version[..] { - "15.0" => "Visual Studio 15", - "14.0" => "Visual Studio 14 2015", - "12.0" => "Visual Studio 12 2013", - vers => panic!("\n\n\ - unsupported or unknown VisualStudio version: {}\n\ - if another version is installed consider running \ - the appropriate vcvars script before building this \ - crate\n\ - ", vers), - } - } - _ => { - // Check for the presense of a specific registry key - // that indicates visual studio is installed. - if self.has_msbuild_version("15.0") { - "Visual Studio 15" - } else if self.has_msbuild_version("14.0") { - "Visual Studio 14 2015" - } else if self.has_msbuild_version("12.0") { - "Visual Studio 12 2013" - } else { - panic!("\n\n\ - couldn't determine visual studio generator\n\ - if VisualStudio is installed, however, consider \ - running the appropriate vcvars script before building \ - this crate\n\ - "); - } - } - }; - - if target.contains("i686") { - base.to_string() - } else if target.contains("x86_64") { - format!("{} Win64", base) - } else { - panic!("unsupported msvc target: {}", target); - } - } - - #[cfg(not(windows))] - fn has_msbuild_version(&self, _version: &str) -> bool { - false - } - - #[cfg(windows)] - fn has_msbuild_version(&self, version: &str) -> bool { - let key = format!("SOFTWARE\\Microsoft\\MSBuild\\ToolsVersions\\{}", - version); - registry::LOCAL_MACHINE.open(key.as_ref()).is_ok() - } - - fn defined(&self, var: &str) -> bool { - self.defines.iter().any(|&(ref a, _)| a == var) - } - - // If a cmake project has previously been built (e.g. CMakeCache.txt already - // exists), then cmake will choke if the source directory for the original - // project being built has changed. Detect this situation through the - // `CMAKE_HOME_DIRECTORY` variable that cmake emits and if it doesn't match - // we blow away the build directory and start from scratch (the recommended - // solution apparently [1]). - // - // [1]: https://cmake.org/pipermail/cmake/2012-August/051545.html - fn maybe_clear(&self, dir: &Path) { - // CMake will apparently store canonicalized paths which normally - // isn't relevant to us but we canonicalize it here to ensure - // we're both checking the same thing. - let path = fs::canonicalize(&self.path).unwrap_or(self.path.clone()); - let src = match path.to_str() { - Some(src) => src, - None => return, - }; - let mut f = match File::open(dir.join("CMakeCache.txt")) { - Ok(f) => f, - Err(..) => return, - }; - let mut u8contents = Vec::new(); - match f.read_to_end(&mut u8contents) { - Ok(f) => f, - Err(..) => return, - }; - let contents = String::from_utf8_lossy(&u8contents); - drop(f); - for line in contents.lines() { - if line.contains("CMAKE_HOME_DIRECTORY") && !line.contains(src) { - println!("detected home dir change, cleaning out entire build \ - directory"); - fs::remove_dir_all(dir).unwrap(); - break - } - } - } -} - -fn run(cmd: &mut Command, program: &str) { - println!("running: {:?}", cmd); - let status = match cmd.status() { - Ok(status) => status, - Err(ref e) if e.kind() == ErrorKind::NotFound => { - fail(&format!("failed to execute command: {}\nis `{}` not installed?", - e, program)); - } - Err(e) => fail(&format!("failed to execute command: {}", e)), - }; - if !status.success() { - fail(&format!("command did not execute successfully, got: {}", status)); - } -} - -fn find_exe(path: &Path) -> PathBuf { - env::split_paths(&env::var_os("PATH").unwrap_or(OsString::new())) - .map(|p| p.join(path)) - .find(|p| fs::metadata(p).is_ok()) - .unwrap_or(path.to_owned()) -} - -fn getenv_unwrap(v: &str) -> String { - match env::var(v) { - Ok(s) => s, - Err(..) => fail(&format!("environment variable `{}` not defined", v)), - } -} - -fn fail(s: &str) -> ! { - panic!("\n{}\n\nbuild script failed, must exit now", s) -} diff -Nru cargo-0.17.0/vendor/cmake-0.1.19/src/registry.rs cargo-0.19.0/vendor/cmake-0.1.19/src/registry.rs --- cargo-0.17.0/vendor/cmake-0.1.19/src/registry.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.19/src/registry.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::ffi::OsStr; -use std::io; -use std::os::raw; -use std::os::windows::prelude::*; - -pub struct RegistryKey(Repr); - -type HKEY = *mut u8; -type DWORD = u32; -type LPDWORD = *mut DWORD; -type LPCWSTR = *const u16; -type LPWSTR = *mut u16; -type LONG = raw::c_long; -type PHKEY = *mut HKEY; -type PFILETIME = *mut u8; -type LPBYTE = *mut u8; -type REGSAM = u32; - -const ERROR_SUCCESS: DWORD = 0; -const HKEY_LOCAL_MACHINE: HKEY = 0x80000002 as HKEY; -const KEY_READ: DWORD = 0x20019; -const KEY_WOW64_32KEY: DWORD = 0x200; - -#[link(name = "advapi32")] -extern "system" { - fn RegOpenKeyExW(key: HKEY, - lpSubKey: LPCWSTR, - ulOptions: DWORD, - samDesired: REGSAM, - phkResult: PHKEY) -> LONG; - fn RegCloseKey(hKey: HKEY) -> LONG; -} - -struct OwnedKey(HKEY); - -enum Repr { - Const(HKEY), - Owned(OwnedKey), -} - -unsafe impl Sync for Repr {} -unsafe impl Send for Repr {} - -pub static LOCAL_MACHINE: RegistryKey = - RegistryKey(Repr::Const(HKEY_LOCAL_MACHINE)); - -impl RegistryKey { - fn raw(&self) -> HKEY { - match self.0 { - Repr::Const(val) => val, - Repr::Owned(ref val) => val.0, - } - } - - pub fn open(&self, key: &OsStr) -> io::Result { - let key = key.encode_wide().chain(Some(0)).collect::>(); - let mut ret = 0 as *mut _; - let err = unsafe { - RegOpenKeyExW(self.raw(), key.as_ptr(), 0, - KEY_READ | KEY_WOW64_32KEY, &mut ret) - }; - if err == ERROR_SUCCESS as LONG { - Ok(RegistryKey(Repr::Owned(OwnedKey(ret)))) - } else { - Err(io::Error::from_raw_os_error(err as i32)) - } - } -} - -impl Drop for OwnedKey { - fn drop(&mut self) { - unsafe { RegCloseKey(self.0); } - } -} diff -Nru cargo-0.17.0/vendor/cmake-0.1.19/.travis.yml cargo-0.19.0/vendor/cmake-0.1.19/.travis.yml --- cargo-0.17.0/vendor/cmake-0.1.19/.travis.yml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.19/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -language: rust -rust: - - stable - - beta - - nightly -sudo: false -before_script: - - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH -script: - - cargo test --verbose - - cargo doc --no-deps -after_success: - - travis-cargo --only nightly doc-upload -env: - global: - secure: "IA467qqr1j0BpyTqG6hO8Kpt+EUDEjO1pBVhu4+L76/dygkQIwROgqdT7uXZqBPMjU6Rbi0wzGXXHJjbCWVTCjh7U/Q0bK2svtR8DKtM0o1Un/YftSUFt2p/WoiJ9PrkUjKh1rHuoyijpUqAls0JfIz8OdC45egT2SWDufljo+s=" - -notifications: - email: - on_success: never diff -Nru cargo-0.17.0/vendor/cmake-0.1.22/.cargo-checksum.json cargo-0.19.0/vendor/cmake-0.1.22/.cargo-checksum.json --- cargo-0.17.0/vendor/cmake-0.1.22/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.22/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"c5565ac6e1981bf3a88d132c16e381411a239a1c25ec140ee13cf2d50f1f97d0","Cargo.toml":"5a9631435995d353894137e862a804ac06c6d7f3aaecfe5f44b87f3da4b523a4","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"8ca528d20639506546044c676ff9069e3e850937b02bff4194dcf9e5c3c50d64","src/lib.rs":"638ad8d84f32b098fd46e2f7d2c053a8dbdb3b60616e48188d1cc85dda9aed96","src/registry.rs":"142a15192f3641e7630419a5cf951c0a1cf6b3605410b6ad6f7335c4fe47e2c1"},"package":"d18d68987ed4c516dcc3e7913659bfa4076f5182eea4a7e0038bb060953e76ac"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/cmake-0.1.22/Cargo.toml cargo-0.19.0/vendor/cmake-0.1.22/Cargo.toml --- cargo-0.17.0/vendor/cmake-0.1.22/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.22/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,17 @@ +[package] + +name = "cmake" +version = "0.1.22" +authors = ["Alex Crichton "] +license = "MIT/Apache-2.0" +readme = "README.md" +keywords = ["build-dependencies"] +repository = "https://github.com/alexcrichton/cmake-rs" +homepage = "https://github.com/alexcrichton/cmake-rs" +documentation = "http://alexcrichton.com/cmake-rs" +description = """ +A build dependency for running `cmake` to build a native library +""" + +[dependencies] +gcc = "0.3.17" diff -Nru cargo-0.17.0/vendor/cmake-0.1.22/.gitignore cargo-0.19.0/vendor/cmake-0.1.22/.gitignore --- cargo-0.17.0/vendor/cmake-0.1.22/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.22/.gitignore 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff -Nru cargo-0.17.0/vendor/cmake-0.1.22/LICENSE-APACHE cargo-0.19.0/vendor/cmake-0.1.22/LICENSE-APACHE --- cargo-0.17.0/vendor/cmake-0.1.22/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.22/LICENSE-APACHE 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.17.0/vendor/cmake-0.1.22/LICENSE-MIT cargo-0.19.0/vendor/cmake-0.1.22/LICENSE-MIT --- cargo-0.17.0/vendor/cmake-0.1.22/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.22/LICENSE-MIT 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/cmake-0.1.22/README.md cargo-0.19.0/vendor/cmake-0.1.22/README.md --- cargo-0.17.0/vendor/cmake-0.1.22/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.22/README.md 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,22 @@ +# cmake + +[![Build Status](https://travis-ci.org/alexcrichton/cmake-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/cmake-rs) + +[Documentation](http://alexcrichton.com/cmake-rs) + +A build dependency for running the `cmake` build tool to compile a native +library. + +```toml +# Cargo.toml +[build-dependencies] +cmake = "0.2" +``` + +# License + +`cmake-rs` is primarily distributed under the terms of both the MIT license and +the Apache License (Version 2.0), with portions covered by various BSD-like +licenses. + +See LICENSE-APACHE, and LICENSE-MIT for details. diff -Nru cargo-0.17.0/vendor/cmake-0.1.22/src/lib.rs cargo-0.19.0/vendor/cmake-0.1.22/src/lib.rs --- cargo-0.17.0/vendor/cmake-0.1.22/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.22/src/lib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,618 @@ +//! A build dependency for running `cmake` to build a native library +//! +//! This crate provides some necessary boilerplate and shim support for running +//! the system `cmake` command to build a native library. It will add +//! appropriate cflags for building code to link into Rust, handle cross +//! compilation, and use the necessary generator for the platform being +//! targeted. +//! +//! The builder-style configuration allows for various variables and such to be +//! passed down into the build as well. +//! +//! ## Installation +//! +//! Add this to your `Cargo.toml`: +//! +//! ```toml +//! [build-dependencies] +//! cmake = "0.1" +//! ``` +//! +//! ## Examples +//! +//! ```no_run +//! use cmake; +//! +//! // Builds the project in the directory located in `libfoo`, installing it +//! // into $OUT_DIR +//! let dst = cmake::build("libfoo"); +//! +//! println!("cargo:rustc-link-search=native={}", dst.display()); +//! println!("cargo:rustc-link-lib=static=foo"); +//! ``` +//! +//! ```no_run +//! use cmake::Config; +//! +//! let dst = Config::new("libfoo") +//! .define("FOO", "BAR") +//! .cflag("-foo") +//! .build(); +//! println!("cargo:rustc-link-search=native={}", dst.display()); +//! println!("cargo:rustc-link-lib=static=foo"); +//! ``` + +#![deny(missing_docs)] + +extern crate gcc; + +use std::env; +use std::ffi::{OsString, OsStr}; +use std::fs::{self, File}; +use std::io::ErrorKind; +use std::io::prelude::*; +use std::path::{Path, PathBuf}; +use std::process::Command; + +#[cfg(windows)] +mod registry; + +/// Builder style configuration for a pending CMake build. +pub struct Config { + path: PathBuf, + generator: Option, + cflags: OsString, + cxxflags: OsString, + defines: Vec<(OsString, OsString)>, + deps: Vec, + target: Option, + host: Option, + out_dir: Option, + profile: Option, + build_args: Vec, + cmake_target: Option, + env: Vec<(OsString, OsString)>, +} + +/// Builds the native library rooted at `path` with the default cmake options. +/// This will return the directory in which the library was installed. +/// +/// # Examples +/// +/// ```no_run +/// use cmake; +/// +/// // Builds the project in the directory located in `libfoo`, installing it +/// // into $OUT_DIR +/// let dst = cmake::build("libfoo"); +/// +/// println!("cargo:rustc-link-search=native={}", dst.display()); +/// println!("cargo:rustc-link-lib=static=foo"); +/// ``` +/// +pub fn build>(path: P) -> PathBuf { + Config::new(path.as_ref()).build() +} + +impl Config { + /// Creates a new blank set of configuration to build the project specified + /// at the path `path`. + pub fn new>(path: P) -> Config { + Config { + path: env::current_dir().unwrap().join(path), + generator: None, + cflags: OsString::new(), + cxxflags: OsString::new(), + defines: Vec::new(), + deps: Vec::new(), + profile: None, + out_dir: None, + target: None, + host: None, + build_args: Vec::new(), + cmake_target: None, + env: Vec::new(), + } + } + + /// Sets the build-tool generator (`-G`) for this compilation. + pub fn generator>(&mut self, generator: T) -> &mut Config { + self.generator = Some(generator.as_ref().to_owned()); + self + } + + /// Adds a custom flag to pass down to the C compiler, supplementing those + /// that this library already passes. + pub fn cflag>(&mut self, flag: P) -> &mut Config { + self.cflags.push(" "); + self.cflags.push(flag.as_ref()); + self + } + + /// Adds a custom flag to pass down to the C++ compiler, supplementing those + /// that this library already passes. + pub fn cxxflag>(&mut self, flag: P) -> &mut Config { + self.cxxflags.push(" "); + self.cxxflags.push(flag.as_ref()); + self + } + + /// Adds a new `-D` flag to pass to cmake during the generation step. + pub fn define(&mut self, k: K, v: V) -> &mut Config + where K: AsRef, V: AsRef + { + self.defines.push((k.as_ref().to_owned(), v.as_ref().to_owned())); + self + } + + /// Registers a dependency for this compilation on the native library built + /// by Cargo previously. + /// + /// This registration will modify the `CMAKE_PREFIX_PATH` environment + /// variable for the build system generation step. + pub fn register_dep(&mut self, dep: &str) -> &mut Config { + self.deps.push(dep.to_string()); + self + } + + /// Sets the target triple for this compilation. + /// + /// This is automatically scraped from `$TARGET` which is set for Cargo + /// build scripts so it's not necessary to call this from a build script. + pub fn target(&mut self, target: &str) -> &mut Config { + self.target = Some(target.to_string()); + self + } + + /// Sets the host triple for this compilation. + /// + /// This is automatically scraped from `$HOST` which is set for Cargo + /// build scripts so it's not necessary to call this from a build script. + pub fn host(&mut self, host: &str) -> &mut Config { + self.host = Some(host.to_string()); + self + } + + /// Sets the output directory for this compilation. + /// + /// This is automatically scraped from `$OUT_DIR` which is set for Cargo + /// build scripts so it's not necessary to call this from a build script. + pub fn out_dir>(&mut self, out: P) -> &mut Config { + self.out_dir = Some(out.as_ref().to_path_buf()); + self + } + + /// Sets the profile for this compilation. + /// + /// This is automatically scraped from `$PROFILE` which is set for Cargo + /// build scripts so it's not necessary to call this from a build script. + pub fn profile(&mut self, profile: &str) -> &mut Config { + self.profile = Some(profile.to_string()); + self + } + + /// Add an argument to the final `cmake` build step + pub fn build_arg>(&mut self, arg: A) -> &mut Config { + self.build_args.push(arg.as_ref().to_owned()); + self + } + + /// Configure an environment variable for the `cmake` processes spawned by + /// this crate in the `build` step. + pub fn env(&mut self, key: K, value: V) -> &mut Config + where K: AsRef, + V: AsRef, + { + self.env.push((key.as_ref().to_owned(), value.as_ref().to_owned())); + self + } + + /// Sets the build target for the final `cmake` build step, this will + /// default to "install" if not specified. + pub fn build_target(&mut self, target: &str) -> &mut Config { + self.cmake_target = Some(target.to_string()); + self + } + + /// Run this configuration, compiling the library with all the configured + /// options. + /// + /// This will run both the build system generator command as well as the + /// command to build the library. + pub fn build(&mut self) -> PathBuf { + let target = self.target.clone().unwrap_or_else(|| { + getenv_unwrap("TARGET") + }); + let host = self.host.clone().unwrap_or_else(|| { + getenv_unwrap("HOST") + }); + let msvc = target.contains("msvc"); + let c_compiler = gcc::Config::new().cargo_metadata(false) + .opt_level(0) + .debug(false) + .target(&target) + .host(&host) + .get_compiler(); + let cxx_compiler = gcc::Config::new().cargo_metadata(false) + .cpp(true) + .opt_level(0) + .debug(false) + .target(&target) + .host(&host) + .get_compiler(); + + let dst = self.out_dir.clone().unwrap_or_else(|| { + PathBuf::from(getenv_unwrap("OUT_DIR")) + }); + let build = dst.join("build"); + self.maybe_clear(&build); + let _ = fs::create_dir(&build); + + // Add all our dependencies to our cmake paths + let mut cmake_prefix_path = Vec::new(); + for dep in &self.deps { + if let Some(root) = env::var_os(&format!("DEP_{}_ROOT", dep)) { + cmake_prefix_path.push(PathBuf::from(root)); + } + } + let system_prefix = env::var_os("CMAKE_PREFIX_PATH") + .unwrap_or(OsString::new()); + cmake_prefix_path.extend(env::split_paths(&system_prefix) + .map(|s| s.to_owned())); + let cmake_prefix_path = env::join_paths(&cmake_prefix_path).unwrap(); + + // Build up the first cmake command to build the build system. + let mut cmd = Command::new("cmake"); + cmd.arg(&self.path) + .current_dir(&build); + if target.contains("windows-gnu") { + if host.contains("windows") { + // On MinGW we need to coerce cmake to not generate a visual + // studio build system but instead use makefiles that MinGW can + // use to build. + if self.generator.is_none() { + cmd.arg("-G").arg("MSYS Makefiles"); + } + } else { + // If we're cross compiling onto windows, then set some + // variables which will hopefully get things to succeed. Some + // systems may need the `windres` or `dlltool` variables set, so + // set them if possible. + if !self.defined("CMAKE_SYSTEM_NAME") { + cmd.arg("-DCMAKE_SYSTEM_NAME=Windows"); + } + if !self.defined("CMAKE_RC_COMPILER") { + let exe = find_exe(c_compiler.path()); + if let Some(name) = exe.file_name().unwrap().to_str() { + let name = name.replace("gcc", "windres"); + let windres = exe.with_file_name(name); + if windres.is_file() { + let mut arg = OsString::from("-DCMAKE_RC_COMPILER="); + arg.push(&windres); + cmd.arg(arg); + } + } + } + } + } else if msvc { + // If we're on MSVC we need to be sure to use the right generator or + // otherwise we won't get 32/64 bit correct automatically. + // This also guarantees that NMake generator isn't chosen implicitly. + if self.generator.is_none() { + cmd.arg("-G").arg(self.visual_studio_generator(&target)); + } + } + let mut is_ninja = false; + if let Some(ref generator) = self.generator { + cmd.arg("-G").arg(generator); + is_ninja = generator.to_string_lossy().contains("Ninja"); + } + let profile = self.profile.clone().unwrap_or_else(|| { + match &getenv_unwrap("PROFILE")[..] { + "bench" | "release" => "Release", + // currently we need to always use the same CRT for MSVC + _ if msvc => "Release", + _ => "Debug", + }.to_string() + }); + for &(ref k, ref v) in &self.defines { + let mut os = OsString::from("-D"); + os.push(k); + os.push("="); + os.push(v); + cmd.arg(os); + } + + if !self.defined("CMAKE_INSTALL_PREFIX") { + let mut dstflag = OsString::from("-DCMAKE_INSTALL_PREFIX="); + dstflag.push(&dst); + cmd.arg(dstflag); + } + + let build_type = self.defines.iter().find(|&&(ref a, _)| { + a == "CMAKE_BUILD_TYPE" + }).map(|x| x.1.to_str().unwrap()).unwrap_or(&profile); + let build_type_upcase = build_type.chars() + .flat_map(|c| c.to_uppercase()) + .collect::(); + + { + // let cmake deal with optimization/debuginfo + let skip_arg = |arg: &OsStr| { + match arg.to_str() { + Some(s) => { + s.starts_with("-O") || s.starts_with("/O") || s == "-g" + } + None => false, + } + }; + let mut set_compiler = |kind: &str, + compiler: &gcc::Tool, + extra: &OsString| { + let flag_var = format!("CMAKE_{}_FLAGS", kind); + let tool_var = format!("CMAKE_{}_COMPILER", kind); + if !self.defined(&flag_var) { + let mut flagsflag = OsString::from("-D"); + flagsflag.push(&flag_var); + flagsflag.push("="); + flagsflag.push(extra); + for arg in compiler.args() { + if skip_arg(arg) { + continue + } + flagsflag.push(" "); + flagsflag.push(arg); + } + cmd.arg(flagsflag); + } + + // The visual studio generator apparently doesn't respect + // `CMAKE_C_FLAGS` but does respect `CMAKE_C_FLAGS_RELEASE` and + // such. We need to communicate /MD vs /MT, so set those vars + // here. + // + // Note that for other generators, though, this *overrides* + // things like the optimization flags, which is bad. + if self.generator.is_none() && msvc { + let flag_var_alt = format!("CMAKE_{}_FLAGS_{}", kind, + build_type_upcase); + if !self.defined(&flag_var_alt) { + let mut flagsflag = OsString::from("-D"); + flagsflag.push(&flag_var_alt); + flagsflag.push("="); + flagsflag.push(extra); + for arg in compiler.args() { + if skip_arg(arg) { + continue + } + flagsflag.push(" "); + flagsflag.push(arg); + } + cmd.arg(flagsflag); + } + } + + // Apparently cmake likes to have an absolute path to the + // compiler as otherwise it sometimes thinks that this variable + // changed as it thinks the found compiler, /usr/bin/cc, + // differs from the specified compiler, cc. Not entirely sure + // what's up, but at least this means cmake doesn't get + // confused? + // + // Also specify this on Windows only if we use MSVC with Ninja, + // as it's not needed for MSVC with Visual Studio generators and + // for MinGW it doesn't really vary. + if !self.defined("CMAKE_TOOLCHAIN_FILE") + && !self.defined(&tool_var) + && (env::consts::FAMILY != "windows" || (msvc && is_ninja)) { + let mut ccompiler = OsString::from("-D"); + ccompiler.push(&tool_var); + ccompiler.push("="); + ccompiler.push(find_exe(compiler.path())); + #[cfg(windows)] { + // CMake doesn't like unescaped `\`s in compiler paths + // so we either have to escape them or replace with `/`s. + use std::os::windows::ffi::{OsStrExt, OsStringExt}; + let wchars = ccompiler.encode_wide().map(|wchar| { + if wchar == b'\\' as u16 { '/' as u16 } else { wchar } + }).collect::>(); + ccompiler = OsString::from_wide(&wchars); + } + cmd.arg(ccompiler); + } + }; + + set_compiler("C", &c_compiler, &self.cflags); + set_compiler("CXX", &cxx_compiler, &self.cxxflags); + } + + if !self.defined("CMAKE_BUILD_TYPE") { + cmd.arg(&format!("-DCMAKE_BUILD_TYPE={}", profile)); + } + + if !self.defined("CMAKE_TOOLCHAIN_FILE") { + if let Ok(s) = env::var("CMAKE_TOOLCHAIN_FILE") { + cmd.arg(&format!("-DCMAKE_TOOLCHAIN_FILE={}", s)); + } + } + + for &(ref k, ref v) in c_compiler.env().iter().chain(&self.env) { + cmd.env(k, v); + } + + run(cmd.env("CMAKE_PREFIX_PATH", cmake_prefix_path), "cmake"); + + let mut parallel_args = Vec::new(); + if let Ok(s) = env::var("NUM_JOBS") { + match self.generator.as_ref().map(|g| g.to_string_lossy()) { + Some(ref g) if g.contains("Ninja") => { + parallel_args.push(format!("-j{}", s)); + } + Some(ref g) if g.contains("Visual Studio") => { + parallel_args.push(format!("/m:{}", s)); + } + Some(ref g) if g.contains("NMake") => { + // NMake creates `Makefile`s, but doesn't understand `-jN`. + } + _ => if fs::metadata(&dst.join("build/Makefile")).is_ok() { + // This looks like `make`, let's hope it understands `-jN`. + parallel_args.push(format!("-j{}", s)); + } + } + } + + // And build! + let target = self.cmake_target.clone().unwrap_or("install".to_string()); + let mut cmd = Command::new("cmake"); + for &(ref k, ref v) in c_compiler.env().iter().chain(&self.env) { + cmd.env(k, v); + } + run(cmd.arg("--build").arg(".") + .arg("--target").arg(target) + .arg("--config").arg(&profile) + .arg("--").args(&self.build_args) + .args(¶llel_args) + .current_dir(&build), "cmake"); + + println!("cargo:root={}", dst.display()); + return dst + } + + fn visual_studio_generator(&self, target: &str) -> String { + let base = match std::env::var("VisualStudioVersion") { + Ok(version) => { + match &version[..] { + "15.0" => "Visual Studio 15", + "14.0" => "Visual Studio 14 2015", + "12.0" => "Visual Studio 12 2013", + vers => panic!("\n\n\ + unsupported or unknown VisualStudio version: {}\n\ + if another version is installed consider running \ + the appropriate vcvars script before building this \ + crate\n\ + ", vers), + } + } + _ => { + // Check for the presense of a specific registry key + // that indicates visual studio is installed. + if self.has_msbuild_version("15.0") { + "Visual Studio 15" + } else if self.has_msbuild_version("14.0") { + "Visual Studio 14 2015" + } else if self.has_msbuild_version("12.0") { + "Visual Studio 12 2013" + } else { + panic!("\n\n\ + couldn't determine visual studio generator\n\ + if VisualStudio is installed, however, consider \ + running the appropriate vcvars script before building \ + this crate\n\ + "); + } + } + }; + + if target.contains("i686") { + base.to_string() + } else if target.contains("x86_64") { + format!("{} Win64", base) + } else { + panic!("unsupported msvc target: {}", target); + } + } + + #[cfg(not(windows))] + fn has_msbuild_version(&self, _version: &str) -> bool { + false + } + + #[cfg(windows)] + fn has_msbuild_version(&self, version: &str) -> bool { + let key = format!("SOFTWARE\\Microsoft\\MSBuild\\ToolsVersions\\{}", + version); + registry::LOCAL_MACHINE.open(key.as_ref()).is_ok() + } + + fn defined(&self, var: &str) -> bool { + self.defines.iter().any(|&(ref a, _)| a == var) + } + + // If a cmake project has previously been built (e.g. CMakeCache.txt already + // exists), then cmake will choke if the source directory for the original + // project being built has changed. Detect this situation through the + // `CMAKE_HOME_DIRECTORY` variable that cmake emits and if it doesn't match + // we blow away the build directory and start from scratch (the recommended + // solution apparently [1]). + // + // [1]: https://cmake.org/pipermail/cmake/2012-August/051545.html + fn maybe_clear(&self, dir: &Path) { + // CMake will apparently store canonicalized paths which normally + // isn't relevant to us but we canonicalize it here to ensure + // we're both checking the same thing. + let path = fs::canonicalize(&self.path).unwrap_or(self.path.clone()); + let mut f = match File::open(dir.join("CMakeCache.txt")) { + Ok(f) => f, + Err(..) => return, + }; + let mut u8contents = Vec::new(); + match f.read_to_end(&mut u8contents) { + Ok(f) => f, + Err(..) => return, + }; + let contents = String::from_utf8_lossy(&u8contents); + drop(f); + for line in contents.lines() { + if line.starts_with("CMAKE_HOME_DIRECTORY") { + let needs_cleanup = match line.split('=').next_back() { + Some(cmake_home) => { + fs::canonicalize(cmake_home) + .ok() + .map(|cmake_home| cmake_home != path) + .unwrap_or(true) + }, + None => true + }; + if needs_cleanup { + println!("detected home dir change, cleaning out entire build \ + directory"); + fs::remove_dir_all(dir).unwrap(); + } + break + } + } + } +} + +fn run(cmd: &mut Command, program: &str) { + println!("running: {:?}", cmd); + let status = match cmd.status() { + Ok(status) => status, + Err(ref e) if e.kind() == ErrorKind::NotFound => { + fail(&format!("failed to execute command: {}\nis `{}` not installed?", + e, program)); + } + Err(e) => fail(&format!("failed to execute command: {}", e)), + }; + if !status.success() { + fail(&format!("command did not execute successfully, got: {}", status)); + } +} + +fn find_exe(path: &Path) -> PathBuf { + env::split_paths(&env::var_os("PATH").unwrap_or(OsString::new())) + .map(|p| p.join(path)) + .find(|p| fs::metadata(p).is_ok()) + .unwrap_or(path.to_owned()) +} + +fn getenv_unwrap(v: &str) -> String { + match env::var(v) { + Ok(s) => s, + Err(..) => fail(&format!("environment variable `{}` not defined", v)), + } +} + +fn fail(s: &str) -> ! { + panic!("\n{}\n\nbuild script failed, must exit now", s) +} diff -Nru cargo-0.17.0/vendor/cmake-0.1.22/src/registry.rs cargo-0.19.0/vendor/cmake-0.1.22/src/registry.rs --- cargo-0.17.0/vendor/cmake-0.1.22/src/registry.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.22/src/registry.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,80 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::ffi::OsStr; +use std::io; +use std::os::raw; +use std::os::windows::prelude::*; + +pub struct RegistryKey(Repr); + +type HKEY = *mut u8; +type DWORD = u32; +type LPCWSTR = *const u16; +type LONG = raw::c_long; +type PHKEY = *mut HKEY; +type REGSAM = u32; + +const ERROR_SUCCESS: DWORD = 0; +const HKEY_LOCAL_MACHINE: HKEY = 0x80000002 as HKEY; +const KEY_READ: DWORD = 0x20019; +const KEY_WOW64_32KEY: DWORD = 0x200; + +#[link(name = "advapi32")] +extern "system" { + fn RegOpenKeyExW(key: HKEY, + lpSubKey: LPCWSTR, + ulOptions: DWORD, + samDesired: REGSAM, + phkResult: PHKEY) -> LONG; + fn RegCloseKey(hKey: HKEY) -> LONG; +} + +struct OwnedKey(HKEY); + +enum Repr { + Const(HKEY), + Owned(OwnedKey), +} + +unsafe impl Sync for Repr {} +unsafe impl Send for Repr {} + +pub static LOCAL_MACHINE: RegistryKey = + RegistryKey(Repr::Const(HKEY_LOCAL_MACHINE)); + +impl RegistryKey { + fn raw(&self) -> HKEY { + match self.0 { + Repr::Const(val) => val, + Repr::Owned(ref val) => val.0, + } + } + + pub fn open(&self, key: &OsStr) -> io::Result { + let key = key.encode_wide().chain(Some(0)).collect::>(); + let mut ret = 0 as *mut _; + let err = unsafe { + RegOpenKeyExW(self.raw(), key.as_ptr(), 0, + KEY_READ | KEY_WOW64_32KEY, &mut ret) + }; + if err == ERROR_SUCCESS as LONG { + Ok(RegistryKey(Repr::Owned(OwnedKey(ret)))) + } else { + Err(io::Error::from_raw_os_error(err as i32)) + } + } +} + +impl Drop for OwnedKey { + fn drop(&mut self) { + unsafe { RegCloseKey(self.0); } + } +} diff -Nru cargo-0.17.0/vendor/cmake-0.1.22/.travis.yml cargo-0.19.0/vendor/cmake-0.1.22/.travis.yml --- cargo-0.17.0/vendor/cmake-0.1.22/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/cmake-0.1.22/.travis.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,20 @@ +language: rust +rust: + - stable + - beta + - nightly +sudo: false +before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH +script: + - cargo test --verbose + - cargo doc --no-deps +after_success: + - travis-cargo --only nightly doc-upload +env: + global: + secure: "IA467qqr1j0BpyTqG6hO8Kpt+EUDEjO1pBVhu4+L76/dygkQIwROgqdT7uXZqBPMjU6Rbi0wzGXXHJjbCWVTCjh7U/Q0bK2svtR8DKtM0o1Un/YftSUFt2p/WoiJ9PrkUjKh1rHuoyijpUqAls0JfIz8OdC45egT2SWDufljo+s=" + +notifications: + email: + on_success: never diff -Nru cargo-0.17.0/vendor/curl-0.4.1/appveyor.yml cargo-0.19.0/vendor/curl-0.4.1/appveyor.yml --- cargo-0.17.0/vendor/curl-0.4.1/appveyor.yml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -environment: - matrix: - - # Ensure MinGW works, but we need to download the 32-bit MinGW compiler from a - # custom location. - - TARGET: i686-pc-windows-gnu - MINGW_URL: https://s3.amazonaws.com/rust-lang-ci - MINGW_ARCHIVE: i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z - MINGW_DIR: mingw32 - - TARGET: x86_64-pc-windows-gnu - MSYS_BITS: 64 - - # Ensure vanilla builds work - - TARGET: i686-pc-windows-msvc - - TARGET: x86_64-pc-windows-msvc - - # Pin to specific VS versions to ensure the build works - - TARGET: x86_64-pc-windows-msvc - ARCH: amd64 - VS: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat - - TARGET: x86_64-pc-windows-msvc - ARCH: amd64 - VS: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat - -install: - # Install rust, x86_64-pc-windows-msvc host - - curl -sSf -o rustup-init.exe https://win.rustup.rs/ - - rustup-init.exe -y --default-host x86_64-pc-windows-msvc - - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin - - # Install the target we're compiling for - - if NOT "%TARGET%" == "x86_64-pc-windows-msvc" rustup target add %TARGET% - - # Use the system msys if we can - - if defined MSYS_BITS set PATH=C:\msys64\mingw%MSYS_BITS%\bin;C:\msys64\usr\bin;%PATH% - - # download a custom compiler otherwise - - if defined MINGW_URL appveyor DownloadFile %MINGW_URL%/%MINGW_ARCHIVE% - - if defined MINGW_URL 7z x -y %MINGW_ARCHIVE% > nul - - if defined MINGW_URL set PATH=C:\Python27;%CD%\%MINGW_DIR%\bin;C:\msys64\usr\bin;%PATH% - - # If we're pinning to a specific visual studio, do so now - - if defined VS call "%VS%" %ARCH% - - # let's see what we got - - where gcc rustc cargo - - rustc -vV - - cargo -vV - - set CARGO_TARGET_DIR=%CD%\target - -build: false - -test_script: - - cargo test --target %TARGET% - - cargo run --manifest-path systest/Cargo.toml --target %TARGET% diff -Nru cargo-0.17.0/vendor/curl-0.4.1/.cargo-checksum.json cargo-0.19.0/vendor/curl-0.4.1/.cargo-checksum.json --- cargo-0.17.0/vendor/curl-0.4.1/.cargo-checksum.json 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"aea7ee9b968d1bcb76f04a6a089eceb631b3af6c3d72c001e3ee5fae8052d8d6",".gitmodules":"360dc395ed93285743c292e50bc35cd0cde7882a44f663489e5fe09df2fd0322",".travis.yml":"e58bec3f2575bc2adc2d64d62c0ed5eabb733aa38bb43d2a6765a136b0c446e6","Cargo.toml":"6df3b73e2b3f7303af43bc40851b930e376fb9b8b531daf72e3268b8ac18bdf7","LICENSE":"f96def8cba2793fb8582fd12ca6d4dc0ef4ee239e8c3f80e809ec43648da6199","README.md":"80c6aaaf91a6d4d39d7ca2c2215cf114218e3d88651d110cf4e327c6d334e8b9","appveyor.yml":"714bf2b74f6cad8106d490b34785d0e6e8043fa40b5b62dbd5910eb8e68437ab","ci/.cargo/config":"0fc30f27f20cc08c09e3b3107a7b79b0beac992fe66969a744b15900ed5322ab","ci/Dockerfile-linux32":"8f4c3531a36154e2bebf045aed0b9a38bd18ce7142ca7c56ebd2fe551747a5d1","ci/Dockerfile-linux64":"4a59f30fb905e4b9de15094d3dfaa5bc2cce62db6961e6d204de31cbbb436b9a","ci/Dockerfile-linux64-curl":"b08eae75dd40b430230d8c225cde0aba88d64192f32d841d7c98648201620537","ci/Dockerfile-mingw":"7fb3093158bbea457b5a6827f337be42b541ea97de851a80e4732b5346b76f05","ci/Dockerfile-musl":"03afb1b28326566a1ee70a66ad0dc5216e6601e642bfbdf120b6403f42786d2a","ci/run.sh":"f629282ecf67a08c09dbcf0169cadb130530f9dc5c4eefdd6d8b8d3b50f10e3a","src/easy.rs":"e954e4355f420a0b4967747d887d31aa013275b9e1f5a41ff6cbc5d7740667ef","src/error.rs":"06d9ee3cfcf382b098635200b381c742ba80d8fd1c6dfde35ad26e2126f8c8f6","src/lib.rs":"0d947b6ee281fd5805e058e3b9773193ced34a81dfd712946f91d48f26aa4d72","src/multi.rs":"70b0df92f9f6633adb27b6b2c143ce74bdb0f02f09da8ce540d586fb5474d7aa","src/panic.rs":"4373b2bedb4403fd0cf2059f925a8fdb4a3f4399e83bab817ecd8f7485004f9c","src/version.rs":"cfda858806a50a36c5e58e433a14202401c16941e8677173d964252405c6adce","tests/easy.rs":"54711c01b2fa7a39ca9d9e93047ee08c1396d84ca0f38e76be51a2e03637efd8","tests/formdata":"5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03","tests/multi.rs":"9cea679dbfc2a1152026784fe1cb641786254a303f0b3611463f4e59fde255e1","tests/post.rs":"c4a69b0dc0d5cb11988cd7e93448d45df98b86d90e3d6c7e8c30b6bebac3fd78","tests/server/mod.rs":"4d1d744586caf09a6ac43fdf4c6012c18708e7c788ccf42cbf493b1894a8d8a2"},"package":"8fd5a1fdcebdb1a59578c5583e66ffed2d13850eac4f51ff730edf6dd6111eac"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/curl-0.4.1/Cargo.toml cargo-0.19.0/vendor/curl-0.4.1/Cargo.toml --- cargo-0.17.0/vendor/curl-0.4.1/Cargo.toml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -[package] - -name = "curl" -version = "0.4.1" -authors = ["Carl Lerche ", - "Alex Crichton "] -license = "MIT" -repository = "https://github.com/carllerche/curl-rust" -description = "Rust bindings to libcurl for making HTTP requests" - -[dependencies] -libc = "0.2" -curl-sys = { path = "curl-sys", version = "0.3" } - -# Unix platforms use OpenSSL for now to provide SSL functionality -[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies] -openssl-sys = "0.9.0" -openssl-probe = "0.1" - -[target."cfg(windows)".dependencies] -winapi = "0.2" - -[dev-dependencies] -mio = "0.6" - -[workspace] -members = ["systest"] diff -Nru cargo-0.17.0/vendor/curl-0.4.1/ci/.cargo/config cargo-0.19.0/vendor/curl-0.4.1/ci/.cargo/config --- cargo-0.17.0/vendor/curl-0.4.1/ci/.cargo/config 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/ci/.cargo/config 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -[target.x86_64-pc-windows-gnu] -linker = "x86_64-w64-mingw32-gcc" diff -Nru cargo-0.17.0/vendor/curl-0.4.1/ci/Dockerfile-linux32 cargo-0.19.0/vendor/curl-0.4.1/ci/Dockerfile-linux32 --- cargo-0.17.0/vendor/curl-0.4.1/ci/Dockerfile-linux32 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/ci/Dockerfile-linux32 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -FROM ubuntu:16.04 - -RUN dpkg --add-architecture i386 -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc-multilib ca-certificates make libc6-dev \ - libssl-dev:i386 pkg-config - -ENV PKG_CONFIG=i686-linux-gnu-pkg-config \ - PKG_CONFIG_ALLOW_CROSS=1 diff -Nru cargo-0.17.0/vendor/curl-0.4.1/ci/Dockerfile-linux64 cargo-0.19.0/vendor/curl-0.4.1/ci/Dockerfile-linux64 --- cargo-0.17.0/vendor/curl-0.4.1/ci/Dockerfile-linux64 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/ci/Dockerfile-linux64 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -FROM ubuntu:16.04 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc ca-certificates make libc6-dev \ - libssl-dev \ - pkg-config diff -Nru cargo-0.17.0/vendor/curl-0.4.1/ci/Dockerfile-linux64-curl cargo-0.19.0/vendor/curl-0.4.1/ci/Dockerfile-linux64-curl --- cargo-0.17.0/vendor/curl-0.4.1/ci/Dockerfile-linux64-curl 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/ci/Dockerfile-linux64-curl 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc ca-certificates make libc6-dev \ - libssl-dev libcurl4-openssl-dev pkg-config diff -Nru cargo-0.17.0/vendor/curl-0.4.1/ci/Dockerfile-mingw cargo-0.19.0/vendor/curl-0.4.1/ci/Dockerfile-mingw --- cargo-0.17.0/vendor/curl-0.4.1/ci/Dockerfile-mingw 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/ci/Dockerfile-mingw 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -FROM ubuntu:16.04 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc ca-certificates make libc6-dev \ - gcc-mingw-w64-x86-64 libz-mingw-w64-dev diff -Nru cargo-0.17.0/vendor/curl-0.4.1/ci/Dockerfile-musl cargo-0.19.0/vendor/curl-0.4.1/ci/Dockerfile-musl --- cargo-0.17.0/vendor/curl-0.4.1/ci/Dockerfile-musl 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/ci/Dockerfile-musl 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -FROM ubuntu:16.04 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc ca-certificates make libc6-dev curl \ - musl-tools - -RUN \ - curl https://www.openssl.org/source/old/1.0.2/openssl-1.0.2g.tar.gz | tar xzf - && \ - cd openssl-1.0.2g && \ - CC=musl-gcc ./Configure --prefix=/openssl no-dso linux-x86_64 -fPIC && \ - make -j10 && \ - make install && \ - cd .. && \ - rm -rf openssl-1.0.2g - -ENV OPENSSL_STATIC=1 \ - OPENSSL_DIR=/openssl diff -Nru cargo-0.17.0/vendor/curl-0.4.1/ci/run.sh cargo-0.19.0/vendor/curl-0.4.1/ci/run.sh --- cargo-0.17.0/vendor/curl-0.4.1/ci/run.sh 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/ci/run.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -#!/bin/sh - -set -ex - -cargo test --target $TARGET --no-run -if [ -z "$NO_RUN" ]; then - cargo test --target $TARGET - cargo run --manifest-path systest/Cargo.toml --target $TARGET - cargo doc --no-deps - cargo doc --no-deps -p curl-sys -fi diff -Nru cargo-0.17.0/vendor/curl-0.4.1/.gitignore cargo-0.19.0/vendor/curl-0.4.1/.gitignore --- cargo-0.17.0/vendor/curl-0.4.1/.gitignore 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -Cargo.lock -target/ diff -Nru cargo-0.17.0/vendor/curl-0.4.1/.gitmodules cargo-0.19.0/vendor/curl-0.4.1/.gitmodules --- cargo-0.17.0/vendor/curl-0.4.1/.gitmodules 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/.gitmodules 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -[submodule "curl-sys/curl"] - path = curl-sys/curl - url = https://github.com/alexcrichton/curl diff -Nru cargo-0.17.0/vendor/curl-0.4.1/LICENSE cargo-0.19.0/vendor/curl-0.4.1/LICENSE --- cargo-0.17.0/vendor/curl-0.4.1/LICENSE 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -Copyright (c) 2014 Carl Lerche - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/curl-0.4.1/README.md cargo-0.19.0/vendor/curl-0.4.1/README.md --- cargo-0.17.0/vendor/curl-0.4.1/README.md 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,137 +0,0 @@ -# curl-rust - -libcurl bindings for Rust - -[![Build Status](https://travis-ci.org/alexcrichton/curl-rust.svg?branch=master)](https://travis-ci.org/alexcrichton/curl-rust) -[![Build status](https://ci.appveyor.com/api/projects/status/lx98wtbxhhhajpr9?svg=true)](https://ci.appveyor.com/project/alexcrichton/curl-rust) - -[Documentation](http://alexcrichton.com/curl-rust) - -## Quick Start - -```rust -extern crate curl; - -use std::io::{stdout, Write}; - -use curl::easy::Easy; - -// Print a web page onto stdout -fn main() { - let mut easy = Easy::new(); - easy.url("https://www.rust-lang.org/").unwrap(); - easy.write_function(|data| { - Ok(stdout().write(data).unwrap()) - }).unwrap(); - easy.perform().unwrap(); - - println!("{}", easy.response_code().unwrap()); -} -``` - -```rust -extern crate curl; - -use curl::easy::Easy; - -// Capture output into a local `Vec`. -fn main() { - let mut dst = Vec::new(); - let mut easy = Easy::new(); - easy.url("https://www.rust-lang.org/").unwrap(); - - let mut transfer = easy.transfer(); - transfer.write_function(|data| { - dst.extend_from_slice(data); - Ok(data.len()) - }).unwrap(); - transfer.perform().unwrap(); -} -``` - -## Post / Put requests - -The `put` and `post` methods on `Easy` can configure the method of the HTTP -request, and then `read_function` can be used to specify how data is filled in. -This interface works particularly well with types that implement `Read`. - -```rust,no_run -extern crate curl; - -use std::io::Read; -use curl::easy::Easy; - -fn main() { - let mut data = "this is the body".as_bytes(); - - let mut easy = Easy::new(); - easy.url("http://www.example.com/upload").unwrap(); - easy.post(true).unwrap(); - easy.post_field_size(data.len() as u64).unwrap(); - - let mut transfer = easy.transfer(); - transfer.read_function(|buf| { - Ok(data.read(buf).unwrap_or(0)) - }).unwrap(); - transfer.perform().unwrap(); -} -``` - -## Custom headers - -Custom headers can be specified as part of the request: - -```rust,no_run -extern crate curl; - -use curl::easy::{Easy, List}; - -fn main() { - let mut easy = Easy::new(); - easy.url("http://www.example.com").unwrap(); - - let mut list = List::new(); - list.append("Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==").unwrap(); - easy.http_headers(list).unwrap(); - easy.perform().unwrap(); -} -``` - -## Keep alive - -The handle can be re-used across multiple requests. Curl will attempt to -keep the connections alive. - -```rust,no_run -extern crate curl; - -use curl::easy::Easy; - -fn main() { - let mut handle = Easy::new(); - - handle.url("http://www.example.com/foo").unwrap(); - handle.perform().unwrap(); - - handle.url("http://www.example.com/bar").unwrap(); - handle.perform().unwrap(); -} -``` - -## Multiple requests - -The libcurl library provides support for sending multiple requests -simultaneously through the "multi" interface. This is currently bound in the -`multi` module of this crate and provides the ability to execute multiple -transfers simultaneously. For more information, see that module. - -## Version Support - -The bindings have been developed using curl version 7.24.0. They should -work with any newer version of curl and possibly with older versions, -but this has not been tested. - -## License - -The `curl-rust` crate is licensed under the MIT license, see `LICENSE` for more -details. diff -Nru cargo-0.17.0/vendor/curl-0.4.1/src/easy.rs cargo-0.19.0/vendor/curl-0.4.1/src/easy.rs --- cargo-0.17.0/vendor/curl-0.4.1/src/easy.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/src/easy.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,3682 +0,0 @@ -//! Bindings to the "easy" libcurl API. -//! -//! This module contains some simple types like `Easy` and `List` which are just -//! wrappers around the corresponding libcurl types. There's also a few enums -//! scattered about for various options here and there. -//! -//! Most simple usage of libcurl will likely use the `Easy` structure here, and -//! you can find more docs about its usage on that struct. - -use std::cell::{RefCell, Cell}; -use std::ffi::{CString, CStr}; -use std::io::SeekFrom; -use std::path::Path; -use std::slice; -use std::str; -use std::time::Duration; - -use curl_sys; -use libc::{self, c_long, c_int, c_char, c_void, size_t, c_double, c_ulong}; - -use {Error, FormError}; -use panic; - -// TODO: checked casts everywhere - -/// Raw bindings to a libcurl "easy session". -/// -/// This type corresponds to the `CURL` type in libcurl, and is probably what -/// you want for just sending off a simple HTTP request and fetching a response. -/// Each easy handle can be thought of as a large builder before calling the -/// final `perform` function. -/// -/// There are many many configuration options for each `Easy` handle, and they -/// should all have their own documentation indicating what it affects and how -/// it interacts with other options. Some implementations of libcurl can use -/// this handle to interact with many different protocols, although by default -/// this crate only guarantees the HTTP/HTTPS protocols working. -/// -/// Note that almost all methods on this structure which configure various -/// properties return a `Result`. This is largely used to detect whether the -/// underlying implementation of libcurl actually implements the option being -/// requested. If you're linked to a version of libcurl which doesn't support -/// the option, then an error will be returned. Some options also perform some -/// validation when they're set, and the error is returned through this vector. -/// -/// ## Examples -/// -/// Creating a handle which can be used later -/// -/// ``` -/// use curl::easy::Easy; -/// -/// let handle = Easy::new(); -/// ``` -/// -/// Send an HTTP request, writing the response to stdout. -/// -/// ``` -/// use std::io::{stdout, Write}; -/// -/// use curl::easy::Easy; -/// -/// let mut handle = Easy::new(); -/// handle.url("https://www.rust-lang.org/").unwrap(); -/// handle.write_function(|data| { -/// Ok(stdout().write(data).unwrap()) -/// }).unwrap(); -/// handle.perform().unwrap(); -/// ``` -/// -/// Collect all output of an HTTP request to a vector. -/// -/// ``` -/// use curl::easy::Easy; -/// -/// let mut data = Vec::new(); -/// let mut handle = Easy::new(); -/// handle.url("https://www.rust-lang.org/").unwrap(); -/// { -/// let mut transfer = handle.transfer(); -/// transfer.write_function(|new_data| { -/// data.extend_from_slice(new_data); -/// Ok(new_data.len()) -/// }).unwrap(); -/// transfer.perform().unwrap(); -/// } -/// println!("{:?}", data); -/// ``` -/// -/// More examples of various properties of an HTTP request can be found on the -/// specific methods as well. -pub struct Easy { - handle: *mut curl_sys::CURL, - data: Box, -} - -/// A scoped transfer of information which borrows an `Easy` and allows -/// referencing stack-local data of the lifetime `'data`. -/// -/// Usage of `Easy` requires the `'static` and `Send` bounds on all callbacks -/// registered, but that's not often wanted if all you need is to collect a -/// bunch of data in memory to a vector, for example. The `Transfer` structure, -/// created by the `Easy::transfer` method, is used for this sort of request. -/// -/// The callbacks attached to a `Transfer` are only active for that one transfer -/// object, and they're allows to elide both the `Send` and `'static` bounds to -/// close over stack-local information. -pub struct Transfer<'easy, 'data> { - easy: &'easy mut Easy, - data: Box>, -} - -#[derive(Default)] -struct EasyData { - running: Cell, - write: Option Result + Send>>, - read: Option Result + Send>>, - seek: Option SeekResult + Send>>, - debug: Option>, - header: Option bool + Send>>, - progress: Option bool + Send>>, - ssl_ctx: Option Result<(), Error> + Send>>, - header_list: Option, - form: Option

, - error_buf: RefCell>, -} - -#[derive(Default)] -struct TransferData<'a> { - write: Option Result + 'a>>, - read: Option Result + 'a>>, - seek: Option SeekResult + 'a>>, - debug: Option>, - header: Option bool + 'a>>, - progress: Option bool + 'a>>, - ssl_ctx: Option Result<(), Error> + 'a>>, -} - -// libcurl guarantees that a CURL handle is fine to be transferred so long as -// it's not used concurrently, and we do that correctly ourselves. -unsafe impl Send for Easy {} - -/// Multipart/formdata for an HTTP POST request. -/// -/// This structure is built up and then passed to the `Easy::httppost` method to -/// be sent off with a request. -pub struct Form { - head: *mut curl_sys::curl_httppost, - tail: *mut curl_sys::curl_httppost, - headers: Vec, - buffers: Vec>, - strings: Vec, -} - -/// One part in a multipart upload, added to a `Form`. -pub struct Part<'form, 'data> { - form: &'form mut Form, - name: &'data str, - array: Vec, - error: Option, -} - -/// Possible proxy types that libcurl currently understands. -#[allow(missing_docs)] -pub enum ProxyType { - Http = curl_sys::CURLPROXY_HTTP as isize, - Http1 = curl_sys::CURLPROXY_HTTP_1_0 as isize, - Socks4 = curl_sys::CURLPROXY_SOCKS4 as isize, - Socks5 = curl_sys::CURLPROXY_SOCKS5 as isize, - Socks4a = curl_sys::CURLPROXY_SOCKS4A as isize, - Socks5Hostname = curl_sys::CURLPROXY_SOCKS5_HOSTNAME as isize, - - /// Hidden variant to indicate that this enum should not be matched on, it - /// may grow over time. - #[doc(hidden)] - __Nonexhaustive, -} - -/// Possible conditions for the `time_condition` method. -#[allow(missing_docs)] -pub enum TimeCondition { - None = curl_sys::CURL_TIMECOND_NONE as isize, - IfModifiedSince = curl_sys::CURL_TIMECOND_IFMODSINCE as isize, - IfUnmodifiedSince = curl_sys::CURL_TIMECOND_IFUNMODSINCE as isize, - LastModified = curl_sys::CURL_TIMECOND_LASTMOD as isize, - - /// Hidden variant to indicate that this enum should not be matched on, it - /// may grow over time. - #[doc(hidden)] - __Nonexhaustive, -} - -/// Possible values to pass to the `ip_resolve` method. -#[allow(missing_docs)] -pub enum IpResolve { - V4 = curl_sys::CURL_IPRESOLVE_V4 as isize, - V6 = curl_sys::CURL_IPRESOLVE_V6 as isize, - Any = curl_sys::CURL_IPRESOLVE_WHATEVER as isize, - - /// Hidden variant to indicate that this enum should not be matched on, it - /// may grow over time. - #[doc(hidden)] - __Nonexhaustive = 500, -} - -/// Possible values to pass to the `ip_resolve` method. -#[allow(missing_docs)] -pub enum SslVersion { - Default = curl_sys::CURL_SSLVERSION_DEFAULT as isize, - Tlsv1 = curl_sys::CURL_SSLVERSION_TLSv1 as isize, - Sslv2 = curl_sys::CURL_SSLVERSION_SSLv2 as isize, - Sslv3 = curl_sys::CURL_SSLVERSION_SSLv3 as isize, - // Tlsv10 = curl_sys::CURL_SSLVERSION_TLSv1_0 as isize, - // Tlsv11 = curl_sys::CURL_SSLVERSION_TLSv1_1 as isize, - // Tlsv12 = curl_sys::CURL_SSLVERSION_TLSv1_2 as isize, - - /// Hidden variant to indicate that this enum should not be matched on, it - /// may grow over time. - #[doc(hidden)] - __Nonexhaustive = 500, -} - -/// Possible return values from the `seek_function` callback. -pub enum SeekResult { - /// Indicates that the seek operation was a success - Ok = curl_sys::CURL_SEEKFUNC_OK as isize, - - /// Indicates that the seek operation failed, and the entire request should - /// fail as a result. - Fail = curl_sys::CURL_SEEKFUNC_FAIL as isize, - - /// Indicates that although the seek failed libcurl should attempt to keep - /// working if possible (for example "seek" through reading). - CantSeek = curl_sys::CURL_SEEKFUNC_CANTSEEK as isize, - - /// Hidden variant to indicate that this enum should not be matched on, it - /// may grow over time. - #[doc(hidden)] - __Nonexhaustive = 500, -} - -/// Possible data chunks that can be witnessed as part of the `debug_function` -/// callback. -pub enum InfoType { - /// The data is informational text. - Text, - - /// The data is header (or header-like) data received from the peer. - HeaderIn, - - /// The data is header (or header-like) data sent to the peer. - HeaderOut, - - /// The data is protocol data received from the peer. - DataIn, - - /// The data is protocol data sent to the peer. - DataOut, - - /// The data is SSL/TLS (binary) data received from the peer. - SslDataIn, - - /// The data is SSL/TLS (binary) data sent to the peer. - SslDataOut, - - /// Hidden variant to indicate that this enum should not be matched on, it - /// may grow over time. - #[doc(hidden)] - __Nonexhaustive, -} - -/// A linked list of a strings -pub struct List { - raw: *mut curl_sys::curl_slist, -} - -/// An iterator over `List` -pub struct Iter<'a> { - _me: &'a List, - cur: *mut curl_sys::curl_slist, -} - -unsafe impl Send for List {} - -/// Possible error codes that can be returned from the `read_function` callback. -pub enum ReadError { - /// Indicates that the connection should be aborted immediately - Abort, - - /// Indicates that reading should be paused until `unpause` is called. - Pause, - - /// Hidden variant to indicate that this enum should not be matched on, it - /// may grow over time. - #[doc(hidden)] - __Nonexhaustive, -} - -/// Possible error codes that can be returned from the `write_function` callback. -pub enum WriteError { - /// Indicates that reading should be paused until `unpause` is called. - Pause, - - /// Hidden variant to indicate that this enum should not be matched on, it - /// may grow over time. - #[doc(hidden)] - __Nonexhaustive, -} - -/// Structure which stores possible authentication methods to get passed to -/// `http_auth` and `proxy_auth`. -#[derive(Clone, Debug)] -pub struct Auth { - bits: c_long, -} - -impl Easy { - /// Creates a new "easy" handle which is the core of almost all operations - /// in libcurl. - /// - /// To use a handle, applications typically configure a number of options - /// followed by a call to `perform`. Options are preserved across calls to - /// `perform` and need to be reset manually (or via the `reset` method) if - /// this is not desired. - pub fn new() -> Easy { - ::init(); - unsafe { - let handle = curl_sys::curl_easy_init(); - assert!(!handle.is_null()); - let mut ret = Easy { - handle: handle, - data: Default::default(), - }; - default_configure(&mut ret); - return ret - } - } - - // ========================================================================= - // Behavior options - - /// Configures this handle to have verbose output to help debug protocol - /// information. - /// - /// By default output goes to stderr, but the `stderr` function on this type - /// can configure that. You can also use the `debug_function` method to get - /// all protocol data sent and received. - /// - /// By default, this option is `false`. - pub fn verbose(&mut self, verbose: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_VERBOSE, verbose as c_long) - } - - /// Indicates whether header information is streamed to the output body of - /// this request. - /// - /// This option is only relevant for protocols which have header metadata - /// (like http or ftp). It's not generally possible to extract headers - /// from the body if using this method, that use case should be intended for - /// the `header_function` method. - /// - /// To set HTTP headers, use the `http_header` method. - /// - /// By default, this option is `false` and corresponds to - /// `CURLOPT_HEADER`. - pub fn show_header(&mut self, show: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_HEADER, show as c_long) - } - - /// Indicates whether a progress meter will be shown for requests done with - /// this handle. - /// - /// This will also prevent the `progress_function` from being called. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_NOPROGRESS`. - pub fn progress(&mut self, progress: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_NOPROGRESS, - (!progress) as c_long) - } - - /// Inform libcurl whether or not it should install signal handlers or - /// attempt to use signals to perform library functions. - /// - /// If this option is disabled then timeouts during name resolution will not - /// work unless libcurl is built against c-ares. Note that enabling this - /// option, however, may not cause libcurl to work with multiple threads. - /// - /// By default this option is `false` and corresponds to `CURLOPT_NOSIGNAL`. - /// Note that this default is **different than libcurl** as it is intended - /// that this library is threadsafe by default. See the [libcurl docs] for - /// some more information. - /// - /// [libcurl docs]: https://curl.haxx.se/libcurl/c/threadsafe.html - pub fn signal(&mut self, signal: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_NOSIGNAL, - (!signal) as c_long) - } - - /// Indicates whether multiple files will be transferred based on the file - /// name pattern. - /// - /// The last part of a filename uses fnmatch-like pattern matching. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_WILDCARDMATCH`. - pub fn wildcard_match(&mut self, m: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_WILDCARDMATCH, m as c_long) - } - - // ========================================================================= - // Callback options - - /// Set callback for writing received data. - /// - /// This callback function gets called by libcurl as soon as there is data - /// received that needs to be saved. - /// - /// The callback function will be passed as much data as possible in all - /// invokes, but you must not make any assumptions. It may be one byte, it - /// may be thousands. If `show_header` is enabled, which makes header data - /// get passed to the write callback, you can get up to - /// `CURL_MAX_HTTP_HEADER` bytes of header data passed into it. This - /// usually means 100K. - /// - /// This function may be called with zero bytes data if the transferred file - /// is empty. - /// - /// The callback should return the number of bytes actually taken care of. - /// If that amount differs from the amount passed to your callback function, - /// it'll signal an error condition to the library. This will cause the - /// transfer to get aborted and the libcurl function used will return - /// an error with `is_write_error`. - /// - /// If your callback function returns `Err(WriteError::Pause)` it will cause - /// this transfer to become paused. See `unpause_write` for further details. - /// - /// By default data is sent into the void, and this corresponds to the - /// `CURLOPT_WRITEFUNCTION` and `CURLOPT_WRITEDATA` options. - /// - /// Note that the lifetime bound on this function is `'static`, but that - /// is often too restrictive. To use stack data consider calling the - /// `transfer` method and then using `write_function` to configure a - /// callback that can reference stack-local data. - /// - /// # Examples - /// - /// ``` - /// use std::io::{stdout, Write}; - /// use curl::easy::Easy; - /// - /// let mut handle = Easy::new(); - /// handle.url("https://www.rust-lang.org/").unwrap(); - /// handle.write_function(|data| { - /// Ok(stdout().write(data).unwrap()) - /// }).unwrap(); - /// handle.perform().unwrap(); - /// ``` - /// - /// Writing to a stack-local buffer - /// - /// ``` - /// use std::io::{stdout, Write}; - /// use curl::easy::Easy; - /// - /// let mut buf = Vec::new(); - /// let mut handle = Easy::new(); - /// handle.url("https://www.rust-lang.org/").unwrap(); - /// - /// let mut transfer = handle.transfer(); - /// transfer.write_function(|data| { - /// buf.extend_from_slice(data); - /// Ok(data.len()) - /// }).unwrap(); - /// transfer.perform().unwrap(); - /// ``` - pub fn write_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(&[u8]) -> Result + Send + 'static - { - self.data.write = Some(Box::new(f)); - unsafe { - return self.set_write_function(easy_write_cb, - &*self.data as *const _ as *mut _) - } - } - - unsafe fn set_write_function(&self, - cb: curl_sys::curl_write_callback, - ptr: *mut c_void) - -> Result<(), Error> { - try!(self.setopt_ptr(curl_sys::CURLOPT_WRITEFUNCTION, cb as *const _)); - try!(self.setopt_ptr(curl_sys::CURLOPT_WRITEDATA, ptr as *const _)); - return Ok(()); - } - - /// Read callback for data uploads. - /// - /// This callback function gets called by libcurl as soon as it needs to - /// read data in order to send it to the peer - like if you ask it to upload - /// or post data to the server. - /// - /// Your function must then return the actual number of bytes that it stored - /// in that memory area. Returning 0 will signal end-of-file to the library - /// and cause it to stop the current transfer. - /// - /// If you stop the current transfer by returning 0 "pre-maturely" (i.e - /// before the server expected it, like when you've said you will upload N - /// bytes and you upload less than N bytes), you may experience that the - /// server "hangs" waiting for the rest of the data that won't come. - /// - /// The read callback may return `Err(ReadError::Abort)` to stop the - /// current operation immediately, resulting in a `is_aborted_by_callback` - /// error code from the transfer. - /// - /// The callback can return `Err(ReadError::Pause)` to cause reading from - /// this connection to pause. See `unpause_read` for further details. - /// - /// By default data not input, and this corresponds to the - /// `CURLOPT_READFUNCTION` and `CURLOPT_READDATA` options. - /// - /// Note that the lifetime bound on this function is `'static`, but that - /// is often too restrictive. To use stack data consider calling the - /// `transfer` method and then using `read_function` to configure a - /// callback that can reference stack-local data. - /// - /// # Examples - /// - /// Read input from stdin - /// - /// ```no_run - /// use std::io::{stdin, Read}; - /// use curl::easy::Easy; - /// - /// let mut handle = Easy::new(); - /// handle.url("https://example.com/login").unwrap(); - /// handle.read_function(|into| { - /// Ok(stdin().read(into).unwrap()) - /// }).unwrap(); - /// handle.post(true).unwrap(); - /// handle.perform().unwrap(); - /// ``` - /// - /// Reading from stack-local data: - /// - /// ```no_run - /// use std::io::{stdin, Read}; - /// use curl::easy::Easy; - /// - /// let mut data_to_upload = &b"foobar"[..]; - /// let mut handle = Easy::new(); - /// handle.url("https://example.com/login").unwrap(); - /// handle.post(true).unwrap(); - /// - /// let mut transfer = handle.transfer(); - /// transfer.read_function(|into| { - /// Ok(data_to_upload.read(into).unwrap()) - /// }).unwrap(); - /// transfer.perform().unwrap(); - /// ``` - pub fn read_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(&mut [u8]) -> Result + Send + 'static - { - self.data.read = Some(Box::new(f)); - unsafe { - self.set_read_function(easy_read_cb, - &*self.data as *const _ as *mut _) - } - } - - unsafe fn set_read_function(&self, - cb: curl_sys::curl_read_callback, - ptr: *mut c_void) -> Result<(), Error> { - try!(self.setopt_ptr(curl_sys::CURLOPT_READFUNCTION, cb as *const _)); - try!(self.setopt_ptr(curl_sys::CURLOPT_READDATA, ptr as *const _)); - return Ok(()); - } - - /// User callback for seeking in input stream. - /// - /// This function gets called by libcurl to seek to a certain position in - /// the input stream and can be used to fast forward a file in a resumed - /// upload (instead of reading all uploaded bytes with the normal read - /// function/callback). It is also called to rewind a stream when data has - /// already been sent to the server and needs to be sent again. This may - /// happen when doing a HTTP PUT or POST with a multi-pass authentication - /// method, or when an existing HTTP connection is reused too late and the - /// server closes the connection. - /// - /// The callback function must return `SeekResult::Ok` on success, - /// `SeekResult::Fail` to cause the upload operation to fail or - /// `SeekResult::CantSeek` to indicate that while the seek failed, libcurl - /// is free to work around the problem if possible. The latter can sometimes - /// be done by instead reading from the input or similar. - /// - /// By default data this option is not set, and this corresponds to the - /// `CURLOPT_SEEKFUNCTION` and `CURLOPT_SEEKDATA` options. - /// - /// Note that the lifetime bound on this function is `'static`, but that - /// is often too restrictive. To use stack data consider calling the - /// `transfer` method and then using `seek_function` to configure a - /// callback that can reference stack-local data. - pub fn seek_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(SeekFrom) -> SeekResult + Send + 'static - { - self.data.seek = Some(Box::new(f)); - unsafe { - self.set_seek_function(easy_seek_cb, - &*self.data as *const _ as *mut _) - } - } - - unsafe fn set_seek_function(&self, - cb: curl_sys::curl_seek_callback, - ptr: *mut c_void) -> Result<(), Error> { - let cb = cb as curl_sys::curl_seek_callback; - try!(self.setopt_ptr(curl_sys::CURLOPT_SEEKFUNCTION, cb as *const _)); - try!(self.setopt_ptr(curl_sys::CURLOPT_SEEKDATA, ptr as *const _)); - Ok(()) - } - - /// Callback to progress meter function - /// - /// This function gets called by libcurl instead of its internal equivalent - /// with a frequent interval. While data is being transferred it will be - /// called very frequently, and during slow periods like when nothing is - /// being transferred it can slow down to about one call per second. - /// - /// The callback gets told how much data libcurl will transfer and has - /// transferred, in number of bytes. The first argument is the total number - /// of bytes libcurl expects to download in this transfer. The second - /// argument is the number of bytes downloaded so far. The third argument is - /// the total number of bytes libcurl expects to upload in this transfer. - /// The fourth argument is the number of bytes uploaded so far. - /// - /// Unknown/unused argument values passed to the callback will be set to - /// zero (like if you only download data, the upload size will remain 0). - /// Many times the callback will be called one or more times first, before - /// it knows the data sizes so a program must be made to handle that. - /// - /// Returning `false` from this callback will cause libcurl to abort the - /// transfer and return `is_aborted_by_callback`. - /// - /// If you transfer data with the multi interface, this function will not be - /// called during periods of idleness unless you call the appropriate - /// libcurl function that performs transfers. - /// - /// `noprogress` must be set to 0 to make this function actually get - /// called. - /// - /// By default this function calls an internal method and corresponds to - /// `CURLOPT_PROGRESSFUNCTION` and `CURLOPT_PROGRESSDATA`. - /// - /// Note that the lifetime bound on this function is `'static`, but that - /// is often too restrictive. To use stack data consider calling the - /// `transfer` method and then using `progress_function` to configure a - /// callback that can reference stack-local data. - pub fn progress_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(f64, f64, f64, f64) -> bool + Send + 'static - { - self.data.progress = Some(Box::new(f)); - unsafe { - self.set_progress_function(easy_progress_cb, - &*self.data as *const _ as *mut _) - } - } - - unsafe fn set_progress_function(&self, - cb: curl_sys::curl_progress_callback, - ptr: *mut c_void) -> Result<(), Error> { - try!(self.setopt_ptr(curl_sys::CURLOPT_PROGRESSFUNCTION, cb as *const _)); - try!(self.setopt_ptr(curl_sys::CURLOPT_PROGRESSDATA, ptr as *const _)); - Ok(()) - } - - /// Callback to SSL context - /// - /// This callback function gets called by libcurl just before the - /// initialization of an SSL connection after having processed all - /// other SSL related options to give a last chance to an - /// application to modify the behaviour of the SSL - /// initialization. The `ssl_ctx` parameter is actually a pointer - /// to the SSL library's SSL_CTX. If an error is returned from the - /// callback no attempt to establish a connection is made and the - /// perform operation will return the callback's error code. - /// - /// This function will get called on all new connections made to a - /// server, during the SSL negotiation. The SSL_CTX pointer will - /// be a new one every time. - /// - /// To use this properly, a non-trivial amount of knowledge of - /// your SSL library is necessary. For example, you can use this - /// function to call library-specific callbacks to add additional - /// validation code for certificates, and even to change the - /// actual URI of a HTTPS request. - /// - /// By default this function calls an internal method and - /// corresponds to `CURLOPT_SSL_CTX_FUNCTION` and - /// `CURLOPT_SSL_CTX_DATA`. - /// - /// Note that the lifetime bound on this function is `'static`, but that - /// is often too restrictive. To use stack data consider calling the - /// `transfer` method and then using `progress_function` to configure a - /// callback that can reference stack-local data. - pub fn ssl_ctx_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(*mut c_void) -> Result<(), Error> + Send + 'static - { - self.data.ssl_ctx = Some(Box::new(f)); - unsafe { - self.set_ssl_ctx_function(easy_ssl_ctx_cb, - &*self.data as *const _ as *mut _) - } - } - - unsafe fn set_ssl_ctx_function(&self, - cb: curl_sys::curl_ssl_ctx_callback, - ptr: *mut c_void) -> Result<(), Error> { - try!(self.setopt_ptr(curl_sys::CURLOPT_SSL_CTX_FUNCTION, cb as *const _)); - try!(self.setopt_ptr(curl_sys::CURLOPT_SSL_CTX_DATA, ptr as *const _)); - Ok(()) - } - - /// Specify a debug callback - /// - /// `debug_function` replaces the standard debug function used when - /// `verbose` is in effect. This callback receives debug information, - /// as specified in the type argument. - /// - /// By default this option is not set and corresponds to the - /// `CURLOPT_DEBUGFUNCTION` and `CURLOPT_DEBUGDATA` options. - /// - /// Note that the lifetime bound on this function is `'static`, but that - /// is often too restrictive. To use stack data consider calling the - /// `transfer` method and then using `debug_function` to configure a - /// callback that can reference stack-local data. - pub fn debug_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(InfoType, &[u8]) + Send + 'static - { - self.data.debug = Some(Box::new(f)); - unsafe { - self.set_debug_function(easy_debug_cb, - &*self.data as *const _ as *mut _) - } - } - - unsafe fn set_debug_function(&self, - cb: curl_sys::curl_debug_callback, - ptr: *mut c_void) -> Result<(), Error> { - try!(self.setopt_ptr(curl_sys::CURLOPT_DEBUGFUNCTION, cb as *const _)); - try!(self.setopt_ptr(curl_sys::CURLOPT_DEBUGDATA, ptr as *const _)); - return Ok(()); - } - - /// Callback that receives header data - /// - /// This function gets called by libcurl as soon as it has received header - /// data. The header callback will be called once for each header and only - /// complete header lines are passed on to the callback. Parsing headers is - /// very easy using this. If this callback returns `false` it'll signal an - /// error to the library. This will cause the transfer to get aborted and - /// the libcurl function in progress will return `is_write_error`. - /// - /// A complete HTTP header that is passed to this function can be up to - /// CURL_MAX_HTTP_HEADER (100K) bytes. - /// - /// It's important to note that the callback will be invoked for the headers - /// of all responses received after initiating a request and not just the - /// final response. This includes all responses which occur during - /// authentication negotiation. If you need to operate on only the headers - /// from the final response, you will need to collect headers in the - /// callback yourself and use HTTP status lines, for example, to delimit - /// response boundaries. - /// - /// When a server sends a chunked encoded transfer, it may contain a - /// trailer. That trailer is identical to a HTTP header and if such a - /// trailer is received it is passed to the application using this callback - /// as well. There are several ways to detect it being a trailer and not an - /// ordinary header: 1) it comes after the response-body. 2) it comes after - /// the final header line (CR LF) 3) a Trailer: header among the regular - /// response-headers mention what header(s) to expect in the trailer. - /// - /// For non-HTTP protocols like FTP, POP3, IMAP and SMTP this function will - /// get called with the server responses to the commands that libcurl sends. - /// - /// By default this option is not set and corresponds to the - /// `CURLOPT_HEADERFUNCTION` and `CURLOPT_HEADERDATA` options. - /// - /// Note that the lifetime bound on this function is `'static`, but that - /// is often too restrictive. To use stack data consider calling the - /// `transfer` method and then using `header_function` to configure a - /// callback that can reference stack-local data. - /// - /// # Examples - /// - /// ``` - /// use std::str; - /// - /// use curl::easy::Easy; - /// - /// let mut handle = Easy::new(); - /// handle.url("https://www.rust-lang.org/").unwrap(); - /// handle.header_function(|header| { - /// print!("header: {}", str::from_utf8(header).unwrap()); - /// true - /// }).unwrap(); - /// handle.perform().unwrap(); - /// ``` - /// - /// Collecting headers to a stack local vector - /// - /// ``` - /// use std::str; - /// - /// use curl::easy::Easy; - /// - /// let mut headers = Vec::new(); - /// let mut handle = Easy::new(); - /// handle.url("https://www.rust-lang.org/").unwrap(); - /// - /// { - /// let mut transfer = handle.transfer(); - /// transfer.header_function(|header| { - /// headers.push(str::from_utf8(header).unwrap().to_string()); - /// true - /// }).unwrap(); - /// transfer.perform().unwrap(); - /// } - /// - /// println!("{:?}", headers); - /// ``` - pub fn header_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(&[u8]) -> bool + Send + 'static - { - self.data.header = Some(Box::new(f)); - unsafe { - self.set_header_function(easy_header_cb, - &*self.data as *const _ as *mut _) - } - } - - // TODO: shouldn't there be a libcurl typedef for this? - unsafe fn set_header_function(&self, - cb: extern fn(*mut c_char, - size_t, - size_t, - *mut c_void) -> size_t, - ptr: *mut c_void) -> Result<(), Error> { - try!(self.setopt_ptr(curl_sys::CURLOPT_HEADERFUNCTION, cb as *const _)); - try!(self.setopt_ptr(curl_sys::CURLOPT_HEADERDATA, ptr as *const _)); - Ok(()) - } - - // ========================================================================= - // Error options - - // TODO: error buffer and stderr - - /// Indicates whether this library will fail on HTTP response codes >= 400. - /// - /// This method is not fail-safe especially when authentication is involved. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_FAILONERROR`. - pub fn fail_on_error(&mut self, fail: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_FAILONERROR, fail as c_long) - } - - // ========================================================================= - // Network options - - /// Provides the URL which this handle will work with. - /// - /// The string provided must be URL-encoded with the format: - /// - /// ```text - /// scheme://host:port/path - /// ``` - /// - /// The syntax is not validated as part of this function and that is - /// deferred until later. - /// - /// By default this option is not set and `perform` will not work until it - /// is set. This option corresponds to `CURLOPT_URL`. - pub fn url(&mut self, url: &str) -> Result<(), Error> { - let url = try!(CString::new(url)); - self.setopt_str(curl_sys::CURLOPT_URL, &url) - } - - /// Configures the port number to connect to, instead of the one specified - /// in the URL or the default of the protocol. - pub fn port(&mut self, port: u16) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_PORT, port as c_long) - } - - // /// Indicates whether sequences of `/../` and `/./` will be squashed or not. - // /// - // /// By default this option is `false` and corresponds to - // /// `CURLOPT_PATH_AS_IS`. - // pub fn path_as_is(&mut self, as_is: bool) -> Result<(), Error> { - // } - - /// Provide the URL of a proxy to use. - /// - /// By default this option is not set and corresponds to `CURLOPT_PROXY`. - pub fn proxy(&mut self, url: &str) -> Result<(), Error> { - let url = try!(CString::new(url)); - self.setopt_str(curl_sys::CURLOPT_PROXY, &url) - } - - /// Provide port number the proxy is listening on. - /// - /// By default this option is not set (the default port for the proxy - /// protocol is used) and corresponds to `CURLOPT_PROXYPORT`. - pub fn proxy_port(&mut self, port: u16) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_PROXYPORT, port as c_long) - } - - /// Indicates the type of proxy being used. - /// - /// By default this option is `ProxyType::Http` and corresponds to - /// `CURLOPT_PROXYTYPE`. - pub fn proxy_type(&mut self, kind: ProxyType) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_PROXYTYPE, kind as c_long) - } - - /// Provide a list of hosts that should not be proxied to. - /// - /// This string is a comma-separated list of hosts which should not use the - /// proxy specified for connections. A single `*` character is also accepted - /// as a wildcard for all hosts. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_NOPROXY`. - pub fn noproxy(&mut self, skip: &str) -> Result<(), Error> { - let skip = try!(CString::new(skip)); - self.setopt_str(curl_sys::CURLOPT_PROXYTYPE, &skip) - } - - /// Inform curl whether it should tunnel all operations through the proxy. - /// - /// This essentially means that a `CONNECT` is sent to the proxy for all - /// outbound requests. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_HTTPPROXYTUNNEL`. - pub fn http_proxy_tunnel(&mut self, tunnel: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_HTTPPROXYTUNNEL, - tunnel as c_long) - } - - /// Tell curl which interface to bind to for an outgoing network interface. - /// - /// The interface name, IP address, or host name can be specified here. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_INTERFACE`. - pub fn interface(&mut self, interface: &str) -> Result<(), Error> { - let s = try!(CString::new(interface)); - self.setopt_str(curl_sys::CURLOPT_INTERFACE, &s) - } - - /// Indicate which port should be bound to locally for this connection. - /// - /// By default this option is 0 (any port) and corresponds to - /// `CURLOPT_LOCALPORT`. - pub fn set_local_port(&mut self, port: u16) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_LOCALPORT, port as c_long) - } - - /// Indicates the number of attempts libcurl will perform to find a working - /// port number. - /// - /// By default this option is 1 and corresponds to - /// `CURLOPT_LOCALPORTRANGE`. - pub fn local_port_range(&mut self, range: u16) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_LOCALPORTRANGE, - range as c_long) - } - - /// Sets the timeout of how long name resolves will be kept in memory. - /// - /// This is distinct from DNS TTL options and is entirely speculative. - /// - /// By default this option is 60s and corresponds to - /// `CURLOPT_DNS_CACHE_TIMEOUT`. - pub fn dns_cache_timeout(&mut self, dur: Duration) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_DNS_CACHE_TIMEOUT, - dur.as_secs() as c_long) - } - - /// Specify the preferred receive buffer size, in bytes. - /// - /// This is treated as a request, not an order, and the main point of this - /// is that the write callback may get called more often with smaller - /// chunks. - /// - /// By default this option is the maximum write size and corresopnds to - /// `CURLOPT_BUFFERSIZE`. - pub fn buffer_size(&mut self, size: usize) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_BUFFERSIZE, size as c_long) - } - - // /// Enable or disable TCP Fast Open - // /// - // /// By default this options defaults to `false` and corresponds to - // /// `CURLOPT_TCP_FASTOPEN` - // pub fn fast_open(&mut self, enable: bool) -> Result<(), Error> { - // } - - /// Configures whether the TCP_NODELAY option is set, or Nagle's algorithm - /// is disabled. - /// - /// The purpose of Nagle's algorithm is to minimize the number of small - /// packet's on the network, and disabling this may be less efficient in - /// some situations. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_TCP_NODELAY`. - pub fn tcp_nodelay(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_TCP_NODELAY, enable as c_long) - } - - // /// Configures whether TCP keepalive probes will be sent. - // /// - // /// The delay and frequency of these probes is controlled by `tcp_keepidle` - // /// and `tcp_keepintvl`. - // /// - // /// By default this option is `false` and corresponds to - // /// `CURLOPT_TCP_KEEPALIVE`. - // pub fn tcp_keepalive(&mut self, enable: bool) -> Result<(), Error> { - // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPALIVE, enable as c_long) - // } - - // /// Configures the TCP keepalive idle time wait. - // /// - // /// This is the delay, after which the connection is idle, keepalive probes - // /// will be sent. Not all operating systems support this. - // /// - // /// By default this corresponds to `CURLOPT_TCP_KEEPIDLE`. - // pub fn tcp_keepidle(&mut self, amt: Duration) -> Result<(), Error> { - // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPIDLE, - // amt.as_secs() as c_long) - // } - // - // /// Configures the delay between keepalive probes. - // /// - // /// By default this corresponds to `CURLOPT_TCP_KEEPINTVL`. - // pub fn tcp_keepintvl(&mut self, amt: Duration) -> Result<(), Error> { - // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPINTVL, - // amt.as_secs() as c_long) - // } - - /// Configures the scope for local IPv6 addresses. - /// - /// Sets the scope_id value to use when connecting to IPv6 or link-local - /// addresses. - /// - /// By default this value is 0 and corresponds to `CURLOPT_ADDRESS_SCOPE` - pub fn address_scope(&mut self, scope: u32) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_ADDRESS_SCOPE, - scope as c_long) - } - - // ========================================================================= - // Names and passwords - - /// Configures the username to pass as authentication for this connection. - /// - /// By default this value is not set and corresponds to `CURLOPT_USERNAME`. - pub fn username(&mut self, user: &str) -> Result<(), Error> { - let user = try!(CString::new(user)); - self.setopt_str(curl_sys::CURLOPT_USERNAME, &user) - } - - /// Configures the password to pass as authentication for this connection. - /// - /// By default this value is not set and corresponds to `CURLOPT_PASSWORD`. - pub fn password(&mut self, pass: &str) -> Result<(), Error> { - let pass = try!(CString::new(pass)); - self.setopt_str(curl_sys::CURLOPT_PASSWORD, &pass) - } - - /// Set HTTP server authentication methods to try - /// - /// If more than one method is set, libcurl will first query the site to see - /// which authentication methods it supports and then pick the best one you - /// allow it to use. For some methods, this will induce an extra network - /// round-trip. Set the actual name and password with the `password` and - /// `username` methods. - /// - /// For authentication with a proxy, see `proxy_auth`. - /// - /// By default this value is basic and corresponds to `CURLOPT_HTTPAUTH`. - pub fn http_auth(&mut self, auth: &Auth) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_HTTPAUTH, auth.bits) - } - - /// Configures the proxy username to pass as authentication for this - /// connection. - /// - /// By default this value is not set and corresponds to - /// `CURLOPT_PROXYUSERNAME`. - pub fn proxy_username(&mut self, user: &str) -> Result<(), Error> { - let user = try!(CString::new(user)); - self.setopt_str(curl_sys::CURLOPT_PROXYUSERNAME, &user) - } - - /// Configures the proxy password to pass as authentication for this - /// connection. - /// - /// By default this value is not set and corresponds to - /// `CURLOPT_PROXYPASSWORD`. - pub fn proxy_password(&mut self, pass: &str) -> Result<(), Error> { - let pass = try!(CString::new(pass)); - self.setopt_str(curl_sys::CURLOPT_PROXYPASSWORD, &pass) - } - - /// Set HTTP proxy authentication methods to try - /// - /// If more than one method is set, libcurl will first query the site to see - /// which authentication methods it supports and then pick the best one you - /// allow it to use. For some methods, this will induce an extra network - /// round-trip. Set the actual name and password with the `proxy_password` - /// and `proxy_username` methods. - /// - /// By default this value is basic and corresponds to `CURLOPT_PROXYAUTH`. - pub fn proxy_auth(&mut self, auth: &Auth) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_PROXYAUTH, auth.bits) - } - - // ========================================================================= - // HTTP Options - - /// Indicates whether the referer header is automatically updated - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_AUTOREFERER`. - pub fn autoreferer(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_AUTOREFERER, enable as c_long) - } - - /// Enables automatic decompression of HTTP downloads. - /// - /// Sets the contents of the Accept-Encoding header sent in an HTTP request. - /// This enables decoding of a response with Content-Encoding. - /// - /// Currently supported encoding are `identity`, `zlib`, and `gzip`. A - /// zero-length string passed in will send all accepted encodings. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_ACCEPT_ENCODING`. - pub fn accept_encoding(&mut self, encoding: &str) -> Result<(), Error> { - let encoding = try!(CString::new(encoding)); - self.setopt_str(curl_sys::CURLOPT_ACCEPT_ENCODING, &encoding) - } - - /// Request the HTTP Transfer Encoding. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_TRANSFER_ENCODING`. - pub fn transfer_encoding(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_TRANSFER_ENCODING, enable as c_long) - } - - /// Follow HTTP 3xx redirects. - /// - /// Indicates whether any `Location` headers in the response should get - /// followed. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_FOLLOWLOCATION`. - pub fn follow_location(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_FOLLOWLOCATION, enable as c_long) - } - - /// Send credentials to hosts other than the first as well. - /// - /// Sends username/password credentials even when the host changes as part - /// of a redirect. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_UNRESTRICTED_AUTH`. - pub fn unrestricted_auth(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_UNRESTRICTED_AUTH, enable as c_long) - } - - /// Set the maximum number of redirects allowed. - /// - /// A value of 0 will refuse any redirect. - /// - /// By default this option is `-1` (unlimited) and corresponds to - /// `CURLOPT_MAXREDIRS`. - pub fn max_redirections(&mut self, max: u32) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_MAXREDIRS, max as c_long) - } - - // TODO: post_redirections - - /// Make an HTTP PUT request. - /// - /// By default this option is `false` and corresponds to `CURLOPT_PUT`. - pub fn put(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_PUT, enable as c_long) - } - - /// Make an HTTP POST request. - /// - /// This will also make the library use the - /// `Content-Type: application/x-www-form-urlencoded` header. - /// - /// POST data can be specified through `post_fields` or by specifying a read - /// function. - /// - /// By default this option is `false` and corresponds to `CURLOPT_POST`. - pub fn post(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_POST, enable as c_long) - } - - /// Configures the data that will be uploaded as part of a POST. - /// - /// Note that the data is copied into this handle and if that's not desired - /// then the read callbacks can be used instead. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_COPYPOSTFIELDS`. - pub fn post_fields_copy(&mut self, data: &[u8]) -> Result<(), Error> { - // Set the length before the pointer so libcurl knows how much to read - try!(self.post_field_size(data.len() as u64)); - self.setopt_ptr(curl_sys::CURLOPT_COPYPOSTFIELDS, - data.as_ptr() as *const _) - } - - /// Configures the size of data that's going to be uploaded as part of a - /// POST operation. - /// - /// This is called automaticsally as part of `post_fields` and should only - /// be called if data is being provided in a read callback (and even then - /// it's optional). - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_POSTFIELDSIZE_LARGE`. - pub fn post_field_size(&mut self, size: u64) -> Result<(), Error> { - // Clear anything previous to ensure we don't read past a buffer - try!(self.setopt_ptr(curl_sys::CURLOPT_POSTFIELDS, 0 as *const _)); - self.setopt_off_t(curl_sys::CURLOPT_POSTFIELDSIZE_LARGE, - size as curl_sys::curl_off_t) - } - - /// Tells libcurl you want a multipart/formdata HTTP POST to be made and you - /// instruct what data to pass on to the server in the `form` argument. - /// - /// By default this option is set to null and corresponds to - /// `CURLOPT_HTTPPOST`. - pub fn httppost(&mut self, form: Form) -> Result<(), Error> { - try!(self.setopt_ptr(curl_sys::CURLOPT_HTTPPOST, - form.head as *const _)); - self.data.form = Some(form); - Ok(()) - } - - /// Sets the HTTP referer header - /// - /// By default this option is not set and corresponds to `CURLOPT_REFERER`. - pub fn referer(&mut self, referer: &str) -> Result<(), Error> { - let referer = try!(CString::new(referer)); - self.setopt_str(curl_sys::CURLOPT_REFERER, &referer) - } - - /// Sets the HTTP user-agent header - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_USERAGENT`. - pub fn useragent(&mut self, useragent: &str) -> Result<(), Error> { - let useragent = try!(CString::new(useragent)); - self.setopt_str(curl_sys::CURLOPT_USERAGENT, &useragent) - } - - /// Add some headers to this HTTP request. - /// - /// If you add a header that is otherwise used internally, the value here - /// takes precedence. If a header is added with no content (like `Accept:`) - /// the internally the header will get disabled. To add a header with no - /// content, use the form `MyHeader;` (not the trailing semicolon). - /// - /// Headers must not be CRLF terminated. Many replaced headers have common - /// shortcuts which should be prefered. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_HTTPHEADER` - /// - /// # Examples - /// - /// ``` - /// use curl::easy::{Easy, List}; - /// - /// let mut list = List::new(); - /// list.append("Foo: bar").unwrap(); - /// list.append("Bar: baz").unwrap(); - /// - /// let mut handle = Easy::new(); - /// handle.url("https://www.rust-lang.org/").unwrap(); - /// handle.http_headers(list).unwrap(); - /// handle.perform().unwrap(); - /// ``` - pub fn http_headers(&mut self, list: List) -> Result<(), Error> { - let ptr = list.raw; - self.data.header_list = Some(list); - self.setopt_ptr(curl_sys::CURLOPT_HTTPHEADER, ptr as *const _) - } - - // /// Add some headers to send to the HTTP proxy. - // /// - // /// This function is essentially the same as `http_headers`. - // /// - // /// By default this option is not set and corresponds to - // /// `CURLOPT_PROXYHEADER` - // pub fn proxy_headers(&mut self, list: &'a List) -> Result<(), Error> { - // self.setopt_ptr(curl_sys::CURLOPT_PROXYHEADER, list.raw as *const _) - // } - - /// Set the contents of the HTTP Cookie header. - /// - /// Pass a string of the form `name=contents` for one cookie value or - /// `name1=val1; name2=val2` for multiple values. - /// - /// Using this option multiple times will only make the latest string - /// override the previous ones. This option will not enable the cookie - /// engine, use `cookie_file` or `cookie_jar` to do that. - /// - /// By default this option is not set and corresponds to `CURLOPT_COOKIE`. - pub fn cookie(&mut self, cookie: &str) -> Result<(), Error> { - let cookie = try!(CString::new(cookie)); - self.setopt_str(curl_sys::CURLOPT_COOKIE, &cookie) - } - - /// Set the file name to read cookies from. - /// - /// The cookie data can be in either the old Netscape / Mozilla cookie data - /// format or just regular HTTP headers (Set-Cookie style) dumped to a file. - /// - /// This also enables the cookie engine, making libcurl parse and send - /// cookies on subsequent requests with this handle. - /// - /// Given an empty or non-existing file or by passing the empty string ("") - /// to this option, you can enable the cookie engine without reading any - /// initial cookies. - /// - /// If you use this option multiple times, you just add more files to read. - /// Subsequent files will add more cookies. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_COOKIEFILE`. - pub fn cookie_file>(&mut self, file: P) -> Result<(), Error> { - self.setopt_path(curl_sys::CURLOPT_COOKIEFILE, file.as_ref()) - } - - /// Set the file name to store cookies to. - /// - /// This will make libcurl write all internally known cookies to the file - /// when this handle is dropped. If no cookies are known, no file will be - /// created. Specify "-" as filename to instead have the cookies written to - /// stdout. Using this option also enables cookies for this session, so if - /// you for example follow a location it will make matching cookies get sent - /// accordingly. - /// - /// Note that libcurl doesn't read any cookies from the cookie jar. If you - /// want to read cookies from a file, use `cookie_file`. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_COOKIEJAR`. - pub fn cookie_jar>(&mut self, file: P) -> Result<(), Error> { - self.setopt_path(curl_sys::CURLOPT_COOKIEJAR, file.as_ref()) - } - - /// Start a new cookie session - /// - /// Marks this as a new cookie "session". It will force libcurl to ignore - /// all cookies it is about to load that are "session cookies" from the - /// previous session. By default, libcurl always stores and loads all - /// cookies, independent if they are session cookies or not. Session cookies - /// are cookies without expiry date and they are meant to be alive and - /// existing for this "session" only. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_COOKIESESSION`. - pub fn cookie_session(&mut self, session: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_COOKIESESSION, session as c_long) - } - - /// Add to or manipulate cookies held in memory. - /// - /// Such a cookie can be either a single line in Netscape / Mozilla format - /// or just regular HTTP-style header (Set-Cookie: ...) format. This will - /// also enable the cookie engine. This adds that single cookie to the - /// internal cookie store. - /// - /// Exercise caution if you are using this option and multiple transfers may - /// occur. If you use the Set-Cookie format and don't specify a domain then - /// the cookie is sent for any domain (even after redirects are followed) - /// and cannot be modified by a server-set cookie. If a server sets a cookie - /// of the same name (or maybe you've imported one) then both will be sent - /// on a future transfer to that server, likely not what you intended. - /// address these issues set a domain in Set-Cookie or use the Netscape - /// format. - /// - /// Additionally, there are commands available that perform actions if you - /// pass in these exact strings: - /// - /// * "ALL" - erases all cookies held in memory - /// * "SESS" - erases all session cookies held in memory - /// * "FLUSH" - write all known cookies to the specified cookie jar - /// * "RELOAD" - reread all cookies from the cookie file - /// - /// By default this options corresponds to `CURLOPT_COOKIELIST` - pub fn cookie_list(&mut self, cookie: &str) -> Result<(), Error> { - let cookie = try!(CString::new(cookie)); - self.setopt_str(curl_sys::CURLOPT_COOKIELIST, &cookie) - } - - /// Ask for a HTTP GET request. - /// - /// By default this option is `false` and corresponds to `CURLOPT_HTTPGET`. - pub fn get(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_HTTPGET, enable as c_long) - } - - // /// Ask for a HTTP GET request. - // /// - // /// By default this option is `false` and corresponds to `CURLOPT_HTTPGET`. - // pub fn http_version(&mut self, vers: &str) -> Result<(), Error> { - // self.setopt_long(curl_sys::CURLOPT_HTTPGET, enable as c_long) - // } - - /// Ignore the content-length header. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_IGNORE_CONTENT_LENGTH`. - pub fn ignore_content_length(&mut self, ignore: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_IGNORE_CONTENT_LENGTH, - ignore as c_long) - } - - /// Enable or disable HTTP content decoding. - /// - /// By default this option is `true` and corresponds to - /// `CURLOPT_HTTP_CONTENT_DECODING`. - pub fn http_content_decoding(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_HTTP_CONTENT_DECODING, - enable as c_long) - } - - /// Enable or disable HTTP transfer decoding. - /// - /// By default this option is `true` and corresponds to - /// `CURLOPT_HTTP_TRANSFER_DECODING`. - pub fn http_transfer_decoding(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_HTTP_TRANSFER_DECODING, - enable as c_long) - } - - // /// Timeout for the Expect: 100-continue response - // /// - // /// By default this option is 1s and corresponds to - // /// `CURLOPT_EXPECT_100_TIMEOUT_MS`. - // pub fn expect_100_timeout(&mut self, enable: bool) -> Result<(), Error> { - // self.setopt_long(curl_sys::CURLOPT_HTTP_TRANSFER_DECODING, - // enable as c_long) - // } - - // /// Wait for pipelining/multiplexing. - // /// - // /// Tells libcurl to prefer to wait for a connection to confirm or deny that - // /// it can do pipelining or multiplexing before continuing. - // /// - // /// When about to perform a new transfer that allows pipelining or - // /// multiplexing, libcurl will check for existing connections to re-use and - // /// pipeline on. If no such connection exists it will immediately continue - // /// and create a fresh new connection to use. - // /// - // /// By setting this option to `true` - having `pipeline` enabled for the - // /// multi handle this transfer is associated with - libcurl will instead - // /// wait for the connection to reveal if it is possible to - // /// pipeline/multiplex on before it continues. This enables libcurl to much - // /// better keep the number of connections to a minimum when using pipelining - // /// or multiplexing protocols. - // /// - // /// The effect thus becomes that with this option set, libcurl prefers to - // /// wait and re-use an existing connection for pipelining rather than the - // /// opposite: prefer to open a new connection rather than waiting. - // /// - // /// The waiting time is as long as it takes for the connection to get up and - // /// for libcurl to get the necessary response back that informs it about its - // /// protocol and support level. - // pub fn http_pipewait(&mut self, enable: bool) -> Result<(), Error> { - // } - - - // ========================================================================= - // Protocol Options - - /// Indicates the range that this request should retrieve. - /// - /// The string provided should be of the form `N-M` where either `N` or `M` - /// can be left out. For HTTP transfers multiple ranges separated by commas - /// are also accepted. - /// - /// By default this option is not set and corresponds to `CURLOPT_RANGE`. - pub fn range(&mut self, range: &str) -> Result<(), Error> { - let range = try!(CString::new(range)); - self.setopt_str(curl_sys::CURLOPT_RANGE, &range) - } - - /// Set a point to resume transfer from - /// - /// Specify the offset in bytes you want the transfer to start from. - /// - /// By default this option is 0 and corresponds to - /// `CURLOPT_RESUME_FROM_LARGE`. - pub fn resume_from(&mut self, from: u64) -> Result<(), Error> { - self.setopt_off_t(curl_sys::CURLOPT_RESUME_FROM_LARGE, - from as curl_sys::curl_off_t) - } - - /// Set a custom request string - /// - /// Specifies that a custom request will be made (e.g. a custom HTTP - /// method). This does not change how libcurl performs internally, just - /// changes the string sent to the server. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_CUSTOMREQUEST`. - pub fn custom_request(&mut self, request: &str) -> Result<(), Error> { - let request = try!(CString::new(request)); - self.setopt_str(curl_sys::CURLOPT_CUSTOMREQUEST, &request) - } - - /// Get the modification time of the remote resource - /// - /// If true, libcurl will attempt to get the modification time of the - /// remote document in this operation. This requires that the remote server - /// sends the time or replies to a time querying command. The `filetime` - /// function can be used after a transfer to extract the received time (if - /// any). - /// - /// By default this option is `false` and corresponds to `CURLOPT_FILETIME` - pub fn fetch_filetime(&mut self, fetch: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_FILETIME, fetch as c_long) - } - - /// Indicate whether to download the request without getting the body - /// - /// This is useful, for example, for doing a HEAD request. - /// - /// By default this option is `false` and corresponds to `CURLOPT_NOBODY`. - pub fn nobody(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_NOBODY, enable as c_long) - } - - /// Set the size of the input file to send off. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_INFILESIZE_LARGE`. - pub fn in_filesize(&mut self, size: u64) -> Result<(), Error> { - self.setopt_off_t(curl_sys::CURLOPT_INFILESIZE_LARGE, - size as curl_sys::curl_off_t) - } - - /// Enable or disable data upload. - /// - /// This means that a PUT request will be made for HTTP and probably wants - /// to be combined with the read callback as well as the `in_filesize` - /// method. - /// - /// By default this option is `false` and corresponds to `CURLOPT_UPLOAD`. - pub fn upload(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_UPLOAD, enable as c_long) - } - - /// Configure the maximum file size to download. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_MAXFILESIZE_LARGE`. - pub fn max_filesize(&mut self, size: u64) -> Result<(), Error> { - self.setopt_off_t(curl_sys::CURLOPT_MAXFILESIZE_LARGE, - size as curl_sys::curl_off_t) - } - - /// Selects a condition for a time request. - /// - /// This value indicates how the `time_value` option is interpreted. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_TIMECONDITION`. - pub fn time_condition(&mut self, cond: TimeCondition) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_TIMECONDITION, cond as c_long) - } - - /// Sets the time value for a conditional request. - /// - /// The value here should be the number of seconds elapsed since January 1, - /// 1970. To pass how to interpret this value, use `time_condition`. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_TIMEVALUE`. - pub fn time_value(&mut self, val: i64) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_TIMEVALUE, val as c_long) - } - - // ========================================================================= - // Connection Options - - /// Set maximum time the request is allowed to take. - /// - /// Normally, name lookups can take a considerable time and limiting - /// operations to less than a few minutes risk aborting perfectly normal - /// operations. - /// - /// If libcurl is built to use the standard system name resolver, that - /// portion of the transfer will still use full-second resolution for - /// timeouts with a minimum timeout allowed of one second. - /// - /// In unix-like systems, this might cause signals to be used unless - /// `nosignal` is set. - /// - /// Since this puts a hard limit for how long time a request is allowed to - /// take, it has limited use in dynamic use cases with varying transfer - /// times. You are then advised to explore `low_speed_limit`, - /// `low_speed_time` or using `progress_function` to implement your own - /// timeout logic. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_TIMEOUT_MS`. - pub fn timeout(&mut self, timeout: Duration) -> Result<(), Error> { - // TODO: checked arithmetic and casts - // TODO: use CURLOPT_TIMEOUT if the timeout is too great - let ms = timeout.as_secs() * 1000 + - (timeout.subsec_nanos() / 1_000_000) as u64; - self.setopt_long(curl_sys::CURLOPT_TIMEOUT_MS, ms as c_long) - - } - - /// Set the low speed limit in bytes per second. - /// - /// This specifies the average transfer speed in bytes per second that the - /// transfer should be below during `low_speed_time` for libcurl to consider - /// it to be too slow and abort. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_LOW_SPEED_LIMIT`. - pub fn low_speed_limit(&mut self, limit: u32) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_LOW_SPEED_LIMIT, limit as c_long) - } - - /// Set the low speed time period. - /// - /// Specifies the window of time for which if the transfer rate is below - /// `low_speed_limit` the request will be aborted. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_LOW_SPEED_TIME`. - pub fn low_speed_time(&mut self, dur: Duration) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_LOW_SPEED_TIME, - dur.as_secs() as c_long) - } - - /// Rate limit data upload speed - /// - /// If an upload exceeds this speed (counted in bytes per second) on - /// cumulative average during the transfer, the transfer will pause to keep - /// the average rate less than or equal to the parameter value. - /// - /// By default this option is not set (unlimited speed) and corresponds to - /// `CURLOPT_MAX_SEND_SPEED_LARGE`. - pub fn max_send_speed(&mut self, speed: u64) -> Result<(), Error> { - self.setopt_off_t(curl_sys::CURLOPT_MAX_SEND_SPEED_LARGE, - speed as curl_sys::curl_off_t) - } - - /// Rate limit data download speed - /// - /// If a download exceeds this speed (counted in bytes per second) on - /// cumulative average during the transfer, the transfer will pause to keep - /// the average rate less than or equal to the parameter value. - /// - /// By default this option is not set (unlimited speed) and corresponds to - /// `CURLOPT_MAX_RECV_SPEED_LARGE`. - pub fn max_recv_speed(&mut self, speed: u64) -> Result<(), Error> { - self.setopt_off_t(curl_sys::CURLOPT_MAX_RECV_SPEED_LARGE, - speed as curl_sys::curl_off_t) - } - - /// Set the maximum connection cache size. - /// - /// The set amount will be the maximum number of simultaneously open - /// persistent connections that libcurl may cache in the pool associated - /// with this handle. The default is 5, and there isn't much point in - /// changing this value unless you are perfectly aware of how this works and - /// changes libcurl's behaviour. This concerns connections using any of the - /// protocols that support persistent connections. - /// - /// When reaching the maximum limit, curl closes the oldest one in the cache - /// to prevent increasing the number of open connections. - /// - /// By default this option is set to 5 and corresponds to - /// `CURLOPT_MAXCONNECTS` - pub fn max_connects(&mut self, max: u32) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_MAXCONNECTS, max as c_long) - } - - /// Force a new connection to be used. - /// - /// Makes the next transfer use a new (fresh) connection by force instead of - /// trying to re-use an existing one. This option should be used with - /// caution and only if you understand what it does as it may seriously - /// impact performance. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_FRESH_CONNECT`. - pub fn fresh_connect(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_FRESH_CONNECT, enable as c_long) - } - - /// Make connection get closed at once after use. - /// - /// Makes libcurl explicitly close the connection when done with the - /// transfer. Normally, libcurl keeps all connections alive when done with - /// one transfer in case a succeeding one follows that can re-use them. - /// This option should be used with caution and only if you understand what - /// it does as it can seriously impact performance. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_FORBID_REUSE`. - pub fn forbid_reuse(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_FORBID_REUSE, enable as c_long) - } - - /// Timeout for the connect phase - /// - /// This is the maximum time that you allow the connection phase to the - /// server to take. This only limits the connection phase, it has no impact - /// once it has connected. - /// - /// By default this value is 300 seconds and corresponds to - /// `CURLOPT_CONNECTTIMEOUT_MS`. - pub fn connect_timeout(&mut self, timeout: Duration) -> Result<(), Error> { - let ms = timeout.as_secs() * 1000 + - (timeout.subsec_nanos() / 1_000_000) as u64; - self.setopt_long(curl_sys::CURLOPT_CONNECTTIMEOUT_MS, ms as c_long) - } - - /// Specify which IP protocol version to use - /// - /// Allows an application to select what kind of IP addresses to use when - /// resolving host names. This is only interesting when using host names - /// that resolve addresses using more than one version of IP. - /// - /// By default this value is "any" and corresponds to `CURLOPT_IPRESOLVE`. - pub fn ip_resolve(&mut self, resolve: IpResolve) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_IPRESOLVE, resolve as c_long) - } - - /// Configure whether to stop when connected to target server - /// - /// When enabled it tells the library to perform all the required proxy - /// authentication and connection setup, but no data transfer, and then - /// return. - /// - /// The option can be used to simply test a connection to a server. - /// - /// By default this value is `false` and corresponds to - /// `CURLOPT_CONNECT_ONLY`. - pub fn connect_only(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_CONNECT_ONLY, enable as c_long) - } - - // /// Set interface to speak DNS over. - // /// - // /// Set the name of the network interface that the DNS resolver should bind - // /// to. This must be an interface name (not an address). - // /// - // /// By default this option is not set and corresponds to - // /// `CURLOPT_DNS_INTERFACE`. - // pub fn dns_interface(&mut self, interface: &str) -> Result<(), Error> { - // let interface = try!(CString::new(interface)); - // self.setopt_str(curl_sys::CURLOPT_DNS_INTERFACE, &interface) - // } - // - // /// IPv4 address to bind DNS resolves to - // /// - // /// Set the local IPv4 address that the resolver should bind to. The - // /// argument should be of type char * and contain a single numerical IPv4 - // /// address as a string. - // /// - // /// By default this option is not set and corresponds to - // /// `CURLOPT_DNS_LOCAL_IP4`. - // pub fn dns_local_ip4(&mut self, ip: &str) -> Result<(), Error> { - // let ip = try!(CString::new(ip)); - // self.setopt_str(curl_sys::CURLOPT_DNS_LOCAL_IP4, &ip) - // } - // - // /// IPv6 address to bind DNS resolves to - // /// - // /// Set the local IPv6 address that the resolver should bind to. The - // /// argument should be of type char * and contain a single numerical IPv6 - // /// address as a string. - // /// - // /// By default this option is not set and corresponds to - // /// `CURLOPT_DNS_LOCAL_IP6`. - // pub fn dns_local_ip6(&mut self, ip: &str) -> Result<(), Error> { - // let ip = try!(CString::new(ip)); - // self.setopt_str(curl_sys::CURLOPT_DNS_LOCAL_IP6, &ip) - // } - // - // /// Set preferred DNS servers. - // /// - // /// Provides a list of DNS servers to be used instead of the system default. - // /// The format of the dns servers option is: - // /// - // /// ```text - // /// host[:port],[host[:port]]... - // /// ``` - // /// - // /// By default this option is not set and corresponds to - // /// `CURLOPT_DNS_SERVERS`. - // pub fn dns_servers(&mut self, servers: &str) -> Result<(), Error> { - // let servers = try!(CString::new(servers)); - // self.setopt_str(curl_sys::CURLOPT_DNS_SERVERS, &servers) - // } - - // ========================================================================= - // SSL/Security Options - - /// Sets the SSL client certificate. - /// - /// The string should be the file name of your client certificate. The - /// default format is "P12" on Secure Transport and "PEM" on other engines, - /// and can be changed with `ssl_cert_type`. - /// - /// With NSS or Secure Transport, this can also be the nickname of the - /// certificate you wish to authenticate with as it is named in the security - /// database. If you want to use a file from the current directory, please - /// precede it with "./" prefix, in order to avoid confusion with a - /// nickname. - /// - /// When using a client certificate, you most likely also need to provide a - /// private key with `ssl_key`. - /// - /// By default this option is not set and corresponds to `CURLOPT_SSLCERT`. - pub fn ssl_cert>(&mut self, cert: P) -> Result<(), Error> { - self.setopt_path(curl_sys::CURLOPT_SSLCERT, cert.as_ref()) - } - - /// Specify type of the client SSL certificate. - /// - /// The string should be the format of your certificate. Supported formats - /// are "PEM" and "DER", except with Secure Transport. OpenSSL (versions - /// 0.9.3 and later) and Secure Transport (on iOS 5 or later, or OS X 10.7 - /// or later) also support "P12" for PKCS#12-encoded files. - /// - /// By default this option is "PEM" and corresponds to - /// `CURLOPT_SSLCERTTYPE`. - pub fn ssl_cert_type(&mut self, kind: &str) -> Result<(), Error> { - let kind = try!(CString::new(kind)); - self.setopt_str(curl_sys::CURLOPT_SSLCERTTYPE, &kind) - } - - /// Specify private keyfile for TLS and SSL client cert. - /// - /// The string should be the file name of your private key. The default - /// format is "PEM" and can be changed with `ssl_key_type`. - /// - /// (iOS and Mac OS X only) This option is ignored if curl was built against - /// Secure Transport. Secure Transport expects the private key to be already - /// present in the keychain or PKCS#12 file containing the certificate. - /// - /// By default this option is not set and corresponds to `CURLOPT_SSLKEY`. - pub fn ssl_key>(&mut self, key: P) -> Result<(), Error> { - self.setopt_path(curl_sys::CURLOPT_SSLKEY, key.as_ref()) - } - - /// Set type of the private key file. - /// - /// The string should be the format of your private key. Supported formats - /// are "PEM", "DER" and "ENG". - /// - /// The format "ENG" enables you to load the private key from a crypto - /// engine. In this case `ssl_key` is used as an identifier passed to - /// the engine. You have to set the crypto engine with `ssl_engine`. - /// "DER" format key file currently does not work because of a bug in - /// OpenSSL. - /// - /// By default this option is "PEM" and corresponds to - /// `CURLOPT_SSLKEYTYPE`. - pub fn ssl_key_type(&mut self, kind: &str) -> Result<(), Error> { - let kind = try!(CString::new(kind)); - self.setopt_str(curl_sys::CURLOPT_SSLKEYTYPE, &kind) - } - - /// Set passphrase to private key. - /// - /// This will be used as the password required to use the `ssl_key`. - /// You never needed a pass phrase to load a certificate but you need one to - /// load your private key. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_KEYPASSWD`. - pub fn key_password(&mut self, password: &str) -> Result<(), Error> { - let password = try!(CString::new(password)); - self.setopt_str(curl_sys::CURLOPT_KEYPASSWD, &password) - } - - /// Set the SSL engine identifier. - /// - /// This will be used as the identifier for the crypto engine you want to - /// use for your private key. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_SSLENGINE`. - pub fn ssl_engine(&mut self, engine: &str) -> Result<(), Error> { - let engine = try!(CString::new(engine)); - self.setopt_str(curl_sys::CURLOPT_SSLENGINE, &engine) - } - - /// Make this handle's SSL engine the default. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_SSLENGINE_DEFAULT`. - pub fn ssl_engine_default(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_SSLENGINE_DEFAULT, enable as c_long) - } - - // /// Enable TLS false start. - // /// - // /// This option determines whether libcurl should use false start during the - // /// TLS handshake. False start is a mode where a TLS client will start - // /// sending application data before verifying the server's Finished message, - // /// thus saving a round trip when performing a full handshake. - // /// - // /// By default this option is not set and corresponds to - // /// `CURLOPT_SSL_FALSESTARTE`. - // pub fn ssl_false_start(&mut self, enable: bool) -> Result<(), Error> { - // self.setopt_long(curl_sys::CURLOPT_SSLENGINE_DEFAULT, enable as c_long) - // } - - /// Set preferred TLS/SSL version. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_SSLVERSION`. - pub fn ssl_version(&mut self, version: SslVersion) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_SSLVERSION, version as c_long) - } - - /// Verify the certificate's name against host. - /// - /// This should be disabled with great caution! It basically disables the - /// security features of SSL if it is disabled. - /// - /// By default this option is set to `true` and corresponds to - /// `CURLOPT_SSL_VERIFYHOST`. - pub fn ssl_verify_host(&mut self, verify: bool) -> Result<(), Error> { - let val = if verify {2} else {0}; - self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYHOST, val) - } - - /// Verify the peer's SSL certificate. - /// - /// This should be disabled with great caution! It basically disables the - /// security features of SSL if it is disabled. - /// - /// By default this option is set to `true` and corresponds to - /// `CURLOPT_SSL_VERIFYPEER`. - pub fn ssl_verify_peer(&mut self, verify: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYPEER, verify as c_long) - } - - // /// Verify the certificate's status. - // /// - // /// This option determines whether libcurl verifies the status of the server - // /// cert using the "Certificate Status Request" TLS extension (aka. OCSP - // /// stapling). - // /// - // /// By default this option is set to `false` and corresponds to - // /// `CURLOPT_SSL_VERIFYSTATUS`. - // pub fn ssl_verify_status(&mut self, verify: bool) -> Result<(), Error> { - // self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYSTATUS, verify as c_long) - // } - - /// Specify the path to Certificate Authority (CA) bundle - /// - /// The file referenced should hold one or more certificates to verify the - /// peer with. - /// - /// This option is by default set to the system path where libcurl's cacert - /// bundle is assumed to be stored, as established at build time. - /// - /// If curl is built against the NSS SSL library, the NSS PEM PKCS#11 module - /// (libnsspem.so) needs to be available for this option to work properly. - /// - /// By default this option is the system defaults, and corresponds to - /// `CURLOPT_CAINFO`. - pub fn cainfo>(&mut self, path: P) -> Result<(), Error> { - self.setopt_path(curl_sys::CURLOPT_CAINFO, path.as_ref()) - } - - /// Set the issuer SSL certificate filename - /// - /// Specifies a file holding a CA certificate in PEM format. If the option - /// is set, an additional check against the peer certificate is performed to - /// verify the issuer is indeed the one associated with the certificate - /// provided by the option. This additional check is useful in multi-level - /// PKI where one needs to enforce that the peer certificate is from a - /// specific branch of the tree. - /// - /// This option makes sense only when used in combination with the - /// `ssl_verify_peer` option. Otherwise, the result of the check is not - /// considered as failure. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_ISSUERCERT`. - pub fn issuer_cert>(&mut self, path: P) -> Result<(), Error> { - self.setopt_path(curl_sys::CURLOPT_ISSUERCERT, path.as_ref()) - } - - /// Specify directory holding CA certificates - /// - /// Names a directory holding multiple CA certificates to verify the peer - /// with. If libcurl is built against OpenSSL, the certificate directory - /// must be prepared using the openssl c_rehash utility. This makes sense - /// only when used in combination with the `ssl_verify_peer` option. - /// - /// By default this option is not set and corresponds to `CURLOPT_CAPATH`. - pub fn capath>(&mut self, path: P) -> Result<(), Error> { - self.setopt_path(curl_sys::CURLOPT_CAPATH, path.as_ref()) - } - - /// Specify a Certificate Revocation List file - /// - /// Names a file with the concatenation of CRL (in PEM format) to use in the - /// certificate validation that occurs during the SSL exchange. - /// - /// When curl is built to use NSS or GnuTLS, there is no way to influence - /// the use of CRL passed to help in the verification process. When libcurl - /// is built with OpenSSL support, X509_V_FLAG_CRL_CHECK and - /// X509_V_FLAG_CRL_CHECK_ALL are both set, requiring CRL check against all - /// the elements of the certificate chain if a CRL file is passed. - /// - /// This option makes sense only when used in combination with the - /// `ssl_verify_peer` option. - /// - /// A specific error code (`is_ssl_crl_badfile`) is defined with the - /// option. It is returned when the SSL exchange fails because the CRL file - /// cannot be loaded. A failure in certificate verification due to a - /// revocation information found in the CRL does not trigger this specific - /// error. - /// - /// By default this option is not set and corresponds to `CURLOPT_CRLFILE`. - pub fn crlfile>(&mut self, path: P) -> Result<(), Error> { - self.setopt_path(curl_sys::CURLOPT_CRLFILE, path.as_ref()) - } - - /// Request SSL certificate information - /// - /// Enable libcurl's certificate chain info gatherer. With this enabled, - /// libcurl will extract lots of information and data about the certificates - /// in the certificate chain used in the SSL connection. - /// - /// By default this option is `false` and corresponds to - /// `CURLOPT_CERTINFO`. - pub fn certinfo(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_CERTINFO, enable as c_long) - } - - // /// Set pinned public key. - // /// - // /// Pass a pointer to a zero terminated string as parameter. The string can - // /// be the file name of your pinned public key. The file format expected is - // /// "PEM" or "DER". The string can also be any number of base64 encoded - // /// sha256 hashes preceded by "sha256//" and separated by ";" - // /// - // /// When negotiating a TLS or SSL connection, the server sends a certificate - // /// indicating its identity. A public key is extracted from this certificate - // /// and if it does not exactly match the public key provided to this option, - // /// curl will abort the connection before sending or receiving any data. - // /// - // /// By default this option is not set and corresponds to - // /// `CURLOPT_PINNEDPUBLICKEY`. - // pub fn pinned_public_key(&mut self, enable: bool) -> Result<(), Error> { - // self.setopt_long(curl_sys::CURLOPT_CERTINFO, enable as c_long) - // } - - /// Specify a source for random data - /// - /// The file will be used to read from to seed the random engine for SSL and - /// more. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_RANDOM_FILE`. - pub fn random_file>(&mut self, p: P) -> Result<(), Error> { - self.setopt_path(curl_sys::CURLOPT_RANDOM_FILE, p.as_ref()) - } - - /// Specify EGD socket path. - /// - /// Indicates the path name to the Entropy Gathering Daemon socket. It will - /// be used to seed the random engine for SSL. - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_EGDSOCKET`. - pub fn egd_socket>(&mut self, p: P) -> Result<(), Error> { - self.setopt_path(curl_sys::CURLOPT_EGDSOCKET, p.as_ref()) - } - - /// Specify ciphers to use for TLS. - /// - /// Holds the list of ciphers to use for the SSL connection. The list must - /// be syntactically correct, it consists of one or more cipher strings - /// separated by colons. Commas or spaces are also acceptable separators - /// but colons are normally used, !, - and + can be used as operators. - /// - /// For OpenSSL and GnuTLS valid examples of cipher lists include 'RC4-SHA', - /// ´SHA1+DES´, 'TLSv1' and 'DEFAULT'. The default list is normally set when - /// you compile OpenSSL. - /// - /// You'll find more details about cipher lists on this URL: - /// - /// https://www.openssl.org/docs/apps/ciphers.html - /// - /// For NSS, valid examples of cipher lists include 'rsa_rc4_128_md5', - /// ´rsa_aes_128_sha´, etc. With NSS you don't add/remove ciphers. If one - /// uses this option then all known ciphers are disabled and only those - /// passed in are enabled. - /// - /// You'll find more details about the NSS cipher lists on this URL: - /// - /// http://git.fedorahosted.org/cgit/mod_nss.git/plain/docs/mod_nss.html#Directives - /// - /// By default this option is not set and corresponds to - /// `CURLOPT_SSL_CIPHER_LIST`. - pub fn ssl_cipher_list(&mut self, ciphers: &str) -> Result<(), Error> { - let ciphers = try!(CString::new(ciphers)); - self.setopt_str(curl_sys::CURLOPT_SSL_CIPHER_LIST, &ciphers) - } - - /// Enable or disable use of the SSL session-ID cache - /// - /// By default all transfers are done using the cache enabled. While nothing - /// ever should get hurt by attempting to reuse SSL session-IDs, there seem - /// to be or have been broken SSL implementations in the wild that may - /// require you to disable this in order for you to succeed. - /// - /// This corresponds to the `CURLOPT_SSL_SESSIONID_CACHE` option. - pub fn ssl_sessionid_cache(&mut self, enable: bool) -> Result<(), Error> { - self.setopt_long(curl_sys::CURLOPT_SSL_SESSIONID_CACHE, - enable as c_long) - } - - // /// Stores a private pointer-sized piece of data. - // /// - // /// This can be retrieved through the `private` function and otherwise - // /// libcurl does not tamper with this value. This corresponds to - // /// `CURLOPT_PRIVATE` and defaults to 0. - // pub fn set_private(&mut self, private: usize) -> Result<(), Error> { - // self.setopt_ptr(curl_sys::CURLOPT_PRIVATE, private as *const _) - // } - // - // /// Fetches this handle's private pointer-sized piece of data. - // /// - // /// This corresponds to `CURLINFO_PRIVATE` and defaults to 0. - // pub fn private(&mut self) -> Result { - // self.getopt_ptr(curl_sys::CURLINFO_PRIVATE).map(|p| p as usize) - // } - - // ========================================================================= - // getters - - /// Get the last used URL - /// - /// In cases when you've asked libcurl to follow redirects, it may - /// not be the same value you set with `url`. - /// - /// This methods corresponds to the `CURLINFO_EFFECTIVE_URL` option. - /// - /// Returns `Ok(None)` if no effective url is listed or `Err` if an error - /// happens or the underlying bytes aren't valid utf-8. - pub fn effective_url(&mut self) -> Result, Error> { - self.getopt_str(curl_sys::CURLINFO_EFFECTIVE_URL) - } - - /// Get the last used URL, in bytes - /// - /// In cases when you've asked libcurl to follow redirects, it may - /// not be the same value you set with `url`. - /// - /// This methods corresponds to the `CURLINFO_EFFECTIVE_URL` option. - /// - /// Returns `Ok(None)` if no effective url is listed or `Err` if an error - /// happens or the underlying bytes aren't valid utf-8. - pub fn effective_url_bytes(&mut self) -> Result, Error> { - self.getopt_bytes(curl_sys::CURLINFO_EFFECTIVE_URL) - } - - /// Get the last response code - /// - /// The stored value will be zero if no server response code has been - /// received. Note that a proxy's CONNECT response should be read with - /// `http_connectcode` and not this. - /// - /// Corresponds to `CURLINFO_RESPONSE_CODE` and returns an error if this - /// option is not supported. - pub fn response_code(&mut self) -> Result { - self.getopt_long(curl_sys::CURLINFO_RESPONSE_CODE).map(|c| c as u32) - } - - /// Get the CONNECT response code - /// - /// Returns the last received HTTP proxy response code to a CONNECT request. - /// The returned value will be zero if no such response code was available. - /// - /// Corresponds to `CURLINFO_HTTP_CONNECTCODE` and returns an error if this - /// option is not supported. - pub fn http_connectcode(&mut self) -> Result { - self.getopt_long(curl_sys::CURLINFO_HTTP_CONNECTCODE).map(|c| c as u32) - } - - /// Get the remote time of the retrieved document - /// - /// Returns the remote time of the retrieved document (in number of seconds - /// since 1 Jan 1970 in the GMT/UTC time zone). If you get `None`, it can be - /// because of many reasons (it might be unknown, the server might hide it - /// or the server doesn't support the command that tells document time etc) - /// and the time of the document is unknown. - /// - /// Note that you must tell the server to collect this information before - /// the transfer is made, by using the `filetime` method to - /// or you will unconditionally get a `None` back. - /// - /// This corresponds to `CURLINFO_FILETIME` and may return an error if the - /// option is not supported - pub fn filetime(&mut self) -> Result, Error> { - self.getopt_long(curl_sys::CURLINFO_FILETIME).map(|r| { - if r == -1 { - None - } else { - Some(r as i64) - } - }) - } - - /// Get the number of redirects - /// - /// Corresponds to `CURLINFO_REDIRECT_COUNT` and may return an error if the - /// option isn't supported. - pub fn redirect_count(&mut self) -> Result { - self.getopt_long(curl_sys::CURLINFO_REDIRECT_COUNT).map(|c| c as u32) - } - - /// Get the URL a redirect would go to - /// - /// Returns the URL a redirect would take you to if you would enable - /// `follow_location`. This can come very handy if you think using the - /// built-in libcurl redirect logic isn't good enough for you but you would - /// still prefer to avoid implementing all the magic of figuring out the new - /// URL. - /// - /// Corresponds to `CURLINFO_REDIRECT_URL` and may return an error if the - /// url isn't valid utf-8 or an error happens. - pub fn redirect_url(&mut self) -> Result, Error> { - self.getopt_str(curl_sys::CURLINFO_REDIRECT_URL) - } - - /// Get the URL a redirect would go to, in bytes - /// - /// Returns the URL a redirect would take you to if you would enable - /// `follow_location`. This can come very handy if you think using the - /// built-in libcurl redirect logic isn't good enough for you but you would - /// still prefer to avoid implementing all the magic of figuring out the new - /// URL. - /// - /// Corresponds to `CURLINFO_REDIRECT_URL` and may return an error. - pub fn redirect_url_bytes(&mut self) -> Result, Error> { - self.getopt_bytes(curl_sys::CURLINFO_REDIRECT_URL) - } - - /// Get size of retrieved headers - /// - /// Corresponds to `CURLINFO_HEADER_SIZE` and may return an error if the - /// option isn't supported. - pub fn header_size(&mut self) -> Result { - self.getopt_long(curl_sys::CURLINFO_HEADER_SIZE).map(|c| c as u64) - } - - /// Get size of sent request. - /// - /// Corresponds to `CURLINFO_REQUEST_SIZE` and may return an error if the - /// option isn't supported. - pub fn request_size(&mut self) -> Result { - self.getopt_long(curl_sys::CURLINFO_REQUEST_SIZE).map(|c| c as u64) - } - - /// Get Content-Type - /// - /// Returns the content-type of the downloaded object. This is the value - /// read from the Content-Type: field. If you get `None`, it means that the - /// server didn't send a valid Content-Type header or that the protocol - /// used doesn't support this. - /// - /// Corresponds to `CURLINFO_CONTENT_TYPE` and may return an error if the - /// option isn't supported. - pub fn content_type(&mut self) -> Result, Error> { - self.getopt_str(curl_sys::CURLINFO_CONTENT_TYPE) - } - - /// Get Content-Type, in bytes - /// - /// Returns the content-type of the downloaded object. This is the value - /// read from the Content-Type: field. If you get `None`, it means that the - /// server didn't send a valid Content-Type header or that the protocol - /// used doesn't support this. - /// - /// Corresponds to `CURLINFO_CONTENT_TYPE` and may return an error if the - /// option isn't supported. - pub fn content_type_bytes(&mut self) -> Result, Error> { - self.getopt_bytes(curl_sys::CURLINFO_CONTENT_TYPE) - } - - /// Get errno number from last connect failure. - /// - /// Note that the value is only set on failure, it is not reset upon a - /// successful operation. The number is OS and system specific. - /// - /// Corresponds to `CURLINFO_OS_ERRNO` and may return an error if the - /// option isn't supported. - pub fn os_errno(&mut self) -> Result { - self.getopt_long(curl_sys::CURLINFO_OS_ERRNO).map(|c| c as i32) - } - - /// Get IP address of last connection. - /// - /// Returns a string holding the IP address of the most recent connection - /// done with this curl handle. This string may be IPv6 when that is - /// enabled. - /// - /// Corresponds to `CURLINFO_PRIMARY_IP` and may return an error if the - /// option isn't supported. - pub fn primary_ip(&mut self) -> Result, Error> { - self.getopt_str(curl_sys::CURLINFO_PRIMARY_IP) - } - - /// Get the latest destination port number - /// - /// Corresponds to `CURLINFO_PRIMARY_PORT` and may return an error if the - /// option isn't supported. - pub fn primary_port(&mut self) -> Result { - self.getopt_long(curl_sys::CURLINFO_PRIMARY_PORT).map(|c| c as u16) - } - - /// Get local IP address of last connection - /// - /// Returns a string holding the IP address of the local end of most recent - /// connection done with this curl handle. This string may be IPv6 when that - /// is enabled. - /// - /// Corresponds to `CURLINFO_LOCAL_IP` and may return an error if the - /// option isn't supported. - pub fn local_ip(&mut self) -> Result, Error> { - self.getopt_str(curl_sys::CURLINFO_LOCAL_IP) - } - - /// Get the latest local port number - /// - /// Corresponds to `CURLINFO_LOCAL_PORT` and may return an error if the - /// option isn't supported. - pub fn local_port(&mut self) -> Result { - self.getopt_long(curl_sys::CURLINFO_LOCAL_PORT).map(|c| c as u16) - } - - /// Get all known cookies - /// - /// Returns a linked-list of all cookies cURL knows (expired ones, too). - /// - /// Corresponds to the `CURLINFO_COOKIELIST` option and may return an error - /// if the option isn't supported. - pub fn cookies(&mut self) -> Result { - unsafe { - let mut list = 0 as *mut _; - let rc = curl_sys::curl_easy_getinfo(self.handle, - curl_sys::CURLINFO_COOKIELIST, - &mut list); - try!(self.cvt(rc)); - Ok(List { raw: list }) - } - } - - // ========================================================================= - // Other methods - - /// After options have been set, this will perform the transfer described by - /// the options. - /// - /// This performs the request in a synchronous fashion. This can be used - /// multiple times for one easy handle and libcurl will attempt to re-use - /// the same connection for all transfers. - /// - /// This method will preserve all options configured in this handle for the - /// next request, and if that is not desired then the options can be - /// manually reset or the `reset` method can be called. - /// - /// Note that this method takes `&self`, which is quite important! This - /// allows applications to close over the handle in various callbacks to - /// call methods like `unpause_write` and `unpause_read` while a transfer is - /// in progress. - pub fn perform(&self) -> Result<(), Error> { - unsafe { - self.reset_scoped_configuration(); - } - self.do_perform() - } - - fn do_perform(&self) -> Result<(), Error> { - if self.data.running.get() { - return Err(Error::new(curl_sys::CURLE_FAILED_INIT)) - } - - self.data.running.set(true); - let ret = unsafe { - self.cvt(curl_sys::curl_easy_perform(self.handle)) - }; - self.data.running.set(false); - panic::propagate(); - return ret - } - - /// Creates a new scoped transfer which can be used to set callbacks and - /// data which only live for the scope of the returned object. - /// - /// An `Easy` handle is often reused between different requests to cache - /// connections to servers, but often the lifetime of the data as part of - /// each transfer is unique. This function serves as an ability to share an - /// `Easy` across many transfers while ergonomically using possibly - /// stack-local data as part of each transfer. - /// - /// Configuration can be set on the `Easy` and then a `Transfer` can be - /// created to set scoped configuration (like callbacks). Finally, the - /// `perform` method on the `Transfer` function can be used. - /// - /// When the `Transfer` option is dropped then all configuration set on the - /// transfer itself will be reset. - pub fn transfer<'data, 'easy>(&'easy mut self) -> Transfer<'easy, 'data> { - // NB: We need to be *very* careful here about how we treat the - // callbacks set on a `Transfer`! It may be possible for that type - // to leak, and if we were to continue using the callbacks there - // there could possibly be use-after-free as they reference - // stack-local data. As a result, we attempt to be robust in the - // face of leaking a `Transfer` (one that didn't drop). - // - // What this basically amounts to is that whenever we poke libcurl that - // *might* call one of those callbacks or use some of that data we clear - // out everything that would have been set on a `Transfer` and instead - // start fresh. This call to `reset_scoped_configuration` will reset all - // callbacks based on the state in *this* handle which we know is still - // alive, so it's safe to configure. - // - // Also note that because we have to be resilient in the face of - // `Transfer` leaks anyway we just don't bother with a `Drop` impl and - // instead rely on this always running to reset any configuration. - assert!(!self.data.running.get()); - unsafe { - self.reset_scoped_configuration(); - } - Transfer { - data: Box::new(TransferData::default()), - easy: self, - } - } - - // See note above in `transfer` for what this is doing. - unsafe fn reset_scoped_configuration(&self) { - let EasyData { - ref write, - ref read, - ref seek, - ref debug, - ref header, - ref progress, - ref ssl_ctx, - ref running, - header_list: _, - form: _, - error_buf: _, - } = *self.data; - - // Can't reset while running, we'll detect this elsewhere - if running.get() { - return - } - - let ptr = |set| { - if set { - &*self.data as *const _ as *mut c_void - } else { - 0 as *mut _ - } - }; - - let write = ptr(write.is_some()); - let read = ptr(read.is_some()); - let seek = ptr(seek.is_some()); - let debug = ptr(debug.is_some()); - let header = ptr(header.is_some()); - let progress = ptr(progress.is_some()); - let ssl_ctx = ptr(ssl_ctx.is_some()); - - let _ = self.set_write_function(easy_write_cb, write); - let _ = self.set_read_function(easy_read_cb, read); - let _ = self.set_seek_function(easy_seek_cb, seek); - let _ = self.set_debug_function(easy_debug_cb, debug); - let _ = self.set_header_function(easy_header_cb, header); - let _ = self.set_progress_function(easy_progress_cb, progress); - let _ = self.set_ssl_ctx_function(easy_ssl_ctx_cb, ssl_ctx); - - // Clear out the post fields which may be referencing stale data. - // curl_sys::curl_easy_setopt(easy, - // curl_sys::CURLOPT_POSTFIELDS, - // 0 as *const i32); - } - - /// Unpause reading on a connection. - /// - /// Using this function, you can explicitly unpause a connection that was - /// previously paused. - /// - /// A connection can be paused by letting the read or the write callbacks - /// return `ReadError::Pause` or `WriteError::Pause`. - /// - /// To unpause, you may for example call this from the progress callback - /// which gets called at least once per second, even if the connection is - /// paused. - /// - /// The chance is high that you will get your write callback called before - /// this function returns. - pub fn unpause_read(&self) -> Result<(), Error> { - unsafe { - let rc = curl_sys::curl_easy_pause(self.handle, - curl_sys::CURLPAUSE_RECV_CONT); - self.cvt(rc) - } - } - - /// Unpause writing on a connection. - /// - /// Using this function, you can explicitly unpause a connection that was - /// previously paused. - /// - /// A connection can be paused by letting the read or the write callbacks - /// return `ReadError::Pause` or `WriteError::Pause`. A write callback that - /// returns pause signals to the library that it couldn't take care of any - /// data at all, and that data will then be delivered again to the callback - /// when the writing is later unpaused. - /// - /// To unpause, you may for example call this from the progress callback - /// which gets called at least once per second, even if the connection is - /// paused. - pub fn unpause_write(&self) -> Result<(), Error> { - unsafe { - let rc = curl_sys::curl_easy_pause(self.handle, - curl_sys::CURLPAUSE_SEND_CONT); - self.cvt(rc) - } - } - - /// URL encodes a string `s` - pub fn url_encode(&mut self, s: &[u8]) -> String { - if s.len() == 0 { - return String::new() - } - unsafe { - let p = curl_sys::curl_easy_escape(self.handle, - s.as_ptr() as *const _, - s.len() as c_int); - assert!(!p.is_null()); - let ret = str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap(); - let ret = String::from(ret); - curl_sys::curl_free(p as *mut _); - return ret - } - } - - /// URL decodes a string `s`, returning `None` if it fails - pub fn url_decode(&mut self, s: &str) -> Vec { - if s.len() == 0 { - return Vec::new(); - } - - // Work around https://curl.haxx.se/docs/adv_20130622.html, a bug where - // if the last few characters are a bad escape then curl will have a - // buffer overrun. - let mut iter = s.chars().rev(); - let orig_len = s.len(); - let mut data; - let mut s = s; - if iter.next() == Some('%') || - iter.next() == Some('%') || - iter.next() == Some('%') { - data = s.to_string(); - data.push(0u8 as char); - s = &data[..]; - } - unsafe { - let mut len = 0; - let p = curl_sys::curl_easy_unescape(self.handle, - s.as_ptr() as *const _, - orig_len as c_int, - &mut len); - assert!(!p.is_null()); - let slice = slice::from_raw_parts(p as *const u8, len as usize); - let ret = slice.to_vec(); - curl_sys::curl_free(p as *mut _); - return ret - } - } - - // TODO: I don't think this is safe, you can drop this which has all the - // callback data and then the next is use-after-free - // - // /// Attempts to clone this handle, returning a new session handle with the - // /// same options set for this handle. - // /// - // /// Internal state info and things like persistent connections ccannot be - // /// transferred. - // /// - // /// # Errors - // /// - // /// If a new handle could not be allocated or another error happens, `None` - // /// is returned. - // pub fn try_clone<'b>(&mut self) -> Option> { - // unsafe { - // let handle = curl_sys::curl_easy_duphandle(self.handle); - // if handle.is_null() { - // None - // } else { - // Some(Easy { - // handle: handle, - // data: blank_data(), - // _marker: marker::PhantomData, - // }) - // } - // } - // } - - /// Re-initializes this handle to the default values. - /// - /// This puts the handle to the same state as it was in when it was just - /// created. This does, however, keep live connections, the session id - /// cache, the dns cache, and cookies. - pub fn reset(&mut self) { - unsafe { - curl_sys::curl_easy_reset(self.handle); - } - default_configure(self); - } - - /// Receives data from a connected socket. - /// - /// Only useful after a successful `perform` with the `connect_only` option - /// set as well. - pub fn recv(&mut self, data: &mut [u8]) -> Result { - unsafe { - let mut n = 0; - let r = curl_sys::curl_easy_recv(self.handle, - data.as_mut_ptr() as *mut _, - data.len(), - &mut n); - if r == curl_sys::CURLE_OK { - Ok(n) - } else { - Err(Error::new(r)) - } - } - } - - /// Sends data over the connected socket. - /// - /// Only useful after a successful `perform` with the `connect_only` option - /// set as well. - pub fn send(&mut self, data: &[u8]) -> Result { - unsafe { - let mut n = 0; - let rc = curl_sys::curl_easy_send(self.handle, - data.as_ptr() as *const _, - data.len(), - &mut n); - try!(self.cvt(rc)); - Ok(n) - } - } - - /// Get a pointer to the raw underlying CURL handle. - pub fn raw(&self) -> *mut curl_sys::CURL { - self.handle - } - - #[cfg(unix)] - fn setopt_path(&mut self, - opt: curl_sys::CURLoption, - val: &Path) -> Result<(), Error> { - use std::os::unix::prelude::*; - let s = try!(CString::new(val.as_os_str().as_bytes())); - self.setopt_str(opt, &s) - } - - #[cfg(windows)] - fn setopt_path(&mut self, - opt: curl_sys::CURLoption, - val: &Path) -> Result<(), Error> { - match val.to_str() { - Some(s) => self.setopt_str(opt, &try!(CString::new(s))), - None => Err(Error::new(curl_sys::CURLE_CONV_FAILED)), - } - } - - fn setopt_long(&mut self, - opt: curl_sys::CURLoption, - val: c_long) -> Result<(), Error> { - unsafe { - self.cvt(curl_sys::curl_easy_setopt(self.handle, opt, val)) - } - } - - fn setopt_str(&mut self, - opt: curl_sys::CURLoption, - val: &CStr) -> Result<(), Error> { - self.setopt_ptr(opt, val.as_ptr()) - } - - fn setopt_ptr(&self, - opt: curl_sys::CURLoption, - val: *const c_char) -> Result<(), Error> { - unsafe { - self.cvt(curl_sys::curl_easy_setopt(self.handle, opt, val)) - } - } - - fn setopt_off_t(&mut self, - opt: curl_sys::CURLoption, - val: curl_sys::curl_off_t) -> Result<(), Error> { - unsafe { - let rc = curl_sys::curl_easy_setopt(self.handle, opt, val); - self.cvt(rc) - } - } - - fn getopt_bytes(&mut self, opt: curl_sys::CURLINFO) - -> Result, Error> { - unsafe { - let p = try!(self.getopt_ptr(opt)); - if p.is_null() { - Ok(None) - } else { - Ok(Some(CStr::from_ptr(p).to_bytes())) - } - } - } - - fn getopt_ptr(&mut self, opt: curl_sys::CURLINFO) - -> Result<*const c_char, Error> { - unsafe { - let mut p = 0 as *const c_char; - let rc = curl_sys::curl_easy_getinfo(self.handle, opt, &mut p); - try!(self.cvt(rc)); - Ok(p) - } - } - - fn getopt_str(&mut self, opt: curl_sys::CURLINFO) - -> Result, Error> { - match self.getopt_bytes(opt) { - Ok(None) => Ok(None), - Err(e) => Err(e), - Ok(Some(bytes)) => { - match str::from_utf8(bytes) { - Ok(s) => Ok(Some(s)), - Err(_) => Err(Error::new(curl_sys::CURLE_CONV_FAILED)), - } - } - } - } - - fn getopt_long(&mut self, opt: curl_sys::CURLINFO) -> Result { - unsafe { - let mut p = 0; - let rc = curl_sys::curl_easy_getinfo(self.handle, opt, &mut p); - try!(self.cvt(rc)); - Ok(p) - } - } - - fn cvt(&self, rc: curl_sys::CURLcode) -> Result<(), Error> { - if rc == curl_sys::CURLE_OK { - return Ok(()) - } - let mut buf = self.data.error_buf.borrow_mut(); - if buf[0] == 0 { - return Err(Error::new(rc)) - } - let pos = buf.iter().position(|i| *i == 0).unwrap_or(buf.len()); - let msg = String::from_utf8_lossy(&buf[..pos]).into_owned(); - buf[0] = 0; - Err(::error::error_with_extra(rc, msg.into_boxed_str())) - } -} - -extern fn easy_write_cb(ptr: *mut c_char, - size: size_t, - nmemb: size_t, - data: *mut c_void) -> size_t { - write_cb(ptr, size, nmemb, data, |buf| unsafe { - (*(data as *mut EasyData)).write.as_mut().map(|f| f(buf)) - }) -} - -extern fn transfer_write_cb(ptr: *mut c_char, - size: size_t, - nmemb: size_t, - data: *mut c_void) -> size_t { - write_cb(ptr, size, nmemb, data, |buf| unsafe { - (*(data as *mut TransferData)).write.as_mut().map(|f| f(buf)) - }) -} - -fn write_cb(ptr: *mut c_char, - size: size_t, - nmemb: size_t, - data: *mut c_void, - f: F) - -> size_t - where F: FnOnce(&[u8]) -> Option> -{ - if data.is_null() { - return size * nmemb - } - panic::catch(|| unsafe { - let input = slice::from_raw_parts(ptr as *const u8, - size * nmemb); - match f(input) { - Some(Ok(s)) => s, - Some(Err(WriteError::Pause)) | - Some(Err(WriteError::__Nonexhaustive)) => { - curl_sys::CURL_WRITEFUNC_PAUSE - } - None => !0, - } - }).unwrap_or(!0) -} - -extern fn easy_read_cb(ptr: *mut c_char, - size: size_t, - nmemb: size_t, - data: *mut c_void) -> size_t { - read_cb(ptr, size, nmemb, data, |buf| unsafe { - (*(data as *mut EasyData)).read.as_mut().map(|f| f(buf)) - }) -} - -extern fn transfer_read_cb(ptr: *mut c_char, - size: size_t, - nmemb: size_t, - data: *mut c_void) -> size_t { - read_cb(ptr, size, nmemb, data, |buf| unsafe { - (*(data as *mut TransferData)).read.as_mut().map(|f| f(buf)) - }) -} - -fn read_cb(ptr: *mut c_char, - size: size_t, - nmemb: size_t, - data: *mut c_void, - f: F) -> size_t - where F: FnOnce(&mut [u8]) -> Option> -{ - unsafe { - if data.is_null() { - return 0 - } - let input = slice::from_raw_parts_mut(ptr as *mut u8, - size * nmemb); - panic::catch(|| { - match f(input) { - Some(Ok(s)) => s, - Some(Err(ReadError::Pause)) => { - curl_sys::CURL_READFUNC_PAUSE - } - Some(Err(ReadError::__Nonexhaustive)) | - Some(Err(ReadError::Abort)) => { - curl_sys::CURL_READFUNC_ABORT - } - None => !0, - } - }).unwrap_or(!0) - } -} - -extern fn easy_seek_cb(data: *mut c_void, - offset: curl_sys::curl_off_t, - origin: c_int) -> c_int { - seek_cb(data, offset, origin, |s| unsafe { - (*(data as *mut EasyData)).seek.as_mut().map(|f| f(s)) - }) -} - -extern fn transfer_seek_cb(data: *mut c_void, - offset: curl_sys::curl_off_t, - origin: c_int) -> c_int { - seek_cb(data, offset, origin, |s| unsafe { - (*(data as *mut TransferData)).seek.as_mut().map(|f| f(s)) - }) -} - -fn seek_cb(data: *mut c_void, - offset: curl_sys::curl_off_t, - origin: c_int, - f: F) -> c_int - where F: FnOnce(SeekFrom) -> Option -{ - if data.is_null() { - return -1 - } - panic::catch(|| { - let from = if origin == libc::SEEK_SET { - SeekFrom::Start(offset as u64) - } else { - panic!("unknown origin from libcurl: {}", origin); - }; - match f(from) { - Some(to) => to as c_int, - None => -1, - } - }).unwrap_or(!0) -} - -extern fn easy_progress_cb(data: *mut c_void, - dltotal: c_double, - dlnow: c_double, - ultotal: c_double, - ulnow: c_double) -> c_int { - progress_cb(data, dltotal, dlnow, ultotal, ulnow, |a, b, c, d| unsafe { - (*(data as *mut EasyData)).progress.as_mut().map(|f| f(a, b, c, d)) - }) -} - -extern fn transfer_progress_cb(data: *mut c_void, - dltotal: c_double, - dlnow: c_double, - ultotal: c_double, - ulnow: c_double) -> c_int { - progress_cb(data, dltotal, dlnow, ultotal, ulnow, |a, b, c, d| unsafe { - (*(data as *mut TransferData)).progress.as_mut().map(|f| f(a, b, c, d)) - }) -} - -fn progress_cb(data: *mut c_void, - dltotal: c_double, - dlnow: c_double, - ultotal: c_double, - ulnow: c_double, - f: F) -> c_int - where F: FnOnce(f64, f64, f64, f64) -> Option, -{ - if data.is_null() { - return 0 - } - let keep_going = panic::catch(|| { - f(dltotal, dlnow, ultotal, ulnow).unwrap_or(false) - }).unwrap_or(false); - if keep_going { - 0 - } else { - 1 - } -} - -extern fn easy_ssl_ctx_cb(handle: *mut curl_sys::CURL, - ssl_ctx: *mut c_void, - data: *mut c_void) -> curl_sys::CURLcode { - - ssl_ctx_cb(handle, ssl_ctx, data, |ssl_ctx| unsafe { - match (*(data as *mut EasyData)).ssl_ctx.as_mut() { - Some(f) => f(ssl_ctx), - // If the callback isn't set we just tell CURL to - // continue. - None => Ok(()), - } - }) -} - -extern fn transfer_ssl_ctx_cb(handle: *mut curl_sys::CURL, - ssl_ctx: *mut c_void, - data: *mut c_void) -> curl_sys::CURLcode { - - ssl_ctx_cb(handle, ssl_ctx, data, |ssl_ctx| unsafe { - match (*(data as *mut TransferData)).ssl_ctx.as_mut() { - Some(f) => f(ssl_ctx), - // If the callback isn't set we just tell CURL to - // continue. - None => Ok(()), - } - }) -} - -// TODO: same thing as `debug_cb`: can we expose `handle`? -fn ssl_ctx_cb(_handle: *mut curl_sys::CURL, - ssl_ctx: *mut c_void, - data: *mut c_void, - f: F) -> curl_sys::CURLcode - where F: FnOnce(*mut c_void) -> Result<(), Error> -{ - if data.is_null() { - return curl_sys::CURLE_OK; - } - - let result = panic::catch(|| { - f(ssl_ctx) - }); - - match result { - Some(Ok(())) => curl_sys::CURLE_OK, - Some(Err(e)) => e.code(), - // Default to a generic SSL error in case of panic. This - // shouldn't really matter since the error should be - // propagated later on but better safe than sorry... - None => curl_sys::CURLE_SSL_CONNECT_ERROR, - } -} - -extern fn easy_debug_cb(handle: *mut curl_sys::CURL, - kind: curl_sys::curl_infotype, - data: *mut c_char, - size: size_t, - userptr: *mut c_void) -> c_int { - debug_cb(handle, kind, data, size, userptr, |a, b| unsafe { - (*(userptr as *mut EasyData)).debug.as_mut().map(|f| f(a, b)) - }) -} - -extern fn transfer_debug_cb(handle: *mut curl_sys::CURL, - kind: curl_sys::curl_infotype, - data: *mut c_char, - size: size_t, - userptr: *mut c_void) -> c_int { - debug_cb(handle, kind, data, size, userptr, |a, b| unsafe { - (*(userptr as *mut TransferData)).debug.as_mut().map(|f| f(a, b)) - }) -} - -// TODO: expose `handle`? is that safe? -fn debug_cb(_handle: *mut curl_sys::CURL, - kind: curl_sys::curl_infotype, - data: *mut c_char, - size: size_t, - userptr: *mut c_void, - f: F) -> c_int - where F: FnOnce(InfoType, &[u8]) -> Option<()> -{ - if userptr.is_null() { - return 0 - } - panic::catch(|| unsafe { - let data = slice::from_raw_parts(data as *const u8, size); - let kind = match kind { - curl_sys::CURLINFO_TEXT => InfoType::Text, - curl_sys::CURLINFO_HEADER_IN => InfoType::HeaderIn, - curl_sys::CURLINFO_HEADER_OUT => InfoType::HeaderOut, - curl_sys::CURLINFO_DATA_IN => InfoType::DataIn, - curl_sys::CURLINFO_DATA_OUT => InfoType::DataOut, - curl_sys::CURLINFO_SSL_DATA_IN => InfoType::SslDataIn, - curl_sys::CURLINFO_SSL_DATA_OUT => InfoType::SslDataOut, - _ => return, - }; - f(kind, data); - }); - return 0 -} - -extern fn easy_header_cb(buffer: *mut c_char, - size: size_t, - nitems: size_t, - userptr: *mut c_void) -> size_t { - header_cb(buffer, size, nitems, userptr, |buf| unsafe { - (*(userptr as *mut EasyData)).header.as_mut().map(|f| f(buf)) - }) -} - -extern fn transfer_header_cb(buffer: *mut c_char, - size: size_t, - nitems: size_t, - userptr: *mut c_void) -> size_t { - header_cb(buffer, size, nitems, userptr, |buf| unsafe { - (*(userptr as *mut TransferData)).header.as_mut().map(|f| f(buf)) - }) -} - -fn header_cb(buffer: *mut c_char, - size: size_t, - nitems: size_t, - userptr: *mut c_void, - f: F) -> size_t - where F: FnOnce(&[u8]) -> Option, -{ - if userptr.is_null() { - return size * nitems - } - let keep_going = panic::catch(|| unsafe { - let data = slice::from_raw_parts(buffer as *const u8, - size * nitems); - f(data).unwrap_or(false) - }).unwrap_or(false); - if keep_going { - size * nitems - } else { - !0 - } -} - -impl<'easy, 'data> Transfer<'easy, 'data> { - /// Same as `Easy::write_function`, just takes a non `'static` lifetime - /// corresponding to the lifetime of this transfer. - pub fn write_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(&[u8]) -> Result + 'data - { - self.data.write = Some(Box::new(f)); - unsafe { - self.easy.set_write_function(transfer_write_cb, - &*self.data as *const _ as *mut _) - } - } - - /// Same as `Easy::read_function`, just takes a non `'static` lifetime - /// corresponding to the lifetime of this transfer. - pub fn read_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(&mut [u8]) -> Result + 'data - { - self.data.read = Some(Box::new(f)); - unsafe { - self.easy.set_read_function(transfer_read_cb, - &*self.data as *const _ as *mut _) - } - } - - /// Same as `Easy::seek_function`, just takes a non `'static` lifetime - /// corresponding to the lifetime of this transfer. - pub fn seek_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(SeekFrom) -> SeekResult + 'data - { - self.data.seek = Some(Box::new(f)); - unsafe { - self.easy.set_seek_function(transfer_seek_cb, - &*self.data as *const _ as *mut _) - } - } - - /// Same as `Easy::progress_function`, just takes a non `'static` lifetime - /// corresponding to the lifetime of this transfer. - pub fn progress_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(f64, f64, f64, f64) -> bool + 'data - { - self.data.progress = Some(Box::new(f)); - unsafe { - self.easy.set_progress_function(transfer_progress_cb, - &*self.data as *const _ as *mut _) - } - } - - /// Same as `Easy::ssl_ctx_function`, just takes a non `'static` - /// lifetime corresponding to the lifetime of this transfer. - pub fn ssl_ctx_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(*mut c_void) -> Result<(), Error> + Send + 'data - { - self.data.ssl_ctx = Some(Box::new(f)); - unsafe { - self.easy.set_ssl_ctx_function(transfer_ssl_ctx_cb, - &*self.data as *const _ as *mut _) - } - } - - /// Same as `Easy::debug_function`, just takes a non `'static` lifetime - /// corresponding to the lifetime of this transfer. - pub fn debug_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(InfoType, &[u8]) + 'data - { - self.data.debug = Some(Box::new(f)); - unsafe { - self.easy.set_debug_function(transfer_debug_cb, - &*self.data as *const _ as *mut _) - } - } - - /// Same as `Easy::header_function`, just takes a non `'static` lifetime - /// corresponding to the lifetime of this transfer. - pub fn header_function(&mut self, f: F) -> Result<(), Error> - where F: FnMut(&[u8]) -> bool + 'data - { - self.data.header = Some(Box::new(f)); - unsafe { - self.easy.set_header_function(transfer_header_cb, - &*self.data as *const _ as *mut _) - } - } - - // TODO: need to figure out how to expose this, but it also needs to be - // reset as part of `reset_scoped_configuration` above. Unfortunately - // setting `CURLOPT_POSTFIELDS` to null will switch the request to - // POST, which is not what we want. - // - // /// Configures the data that will be uploaded as part of a POST. - // /// - // /// By default this option is not set and corresponds to - // /// `CURLOPT_POSTFIELDS`. - // pub fn post_fields(&mut self, data: &'data [u8]) -> Result<(), Error> { - // // Set the length before the pointer so libcurl knows how much to read - // try!(self.easy.post_field_size(data.len() as u64)); - // self.easy.setopt_ptr(curl_sys::CURLOPT_POSTFIELDS, - // data.as_ptr() as *const _) - // } - - /// Same as `Easy::transfer`. - pub fn perform(&self) -> Result<(), Error> { - self.easy.do_perform() - } - - /// Same as `Easy::unpause_read`. - pub fn unpause_read(&self) -> Result<(), Error> { - self.easy.unpause_read() - } - - /// Same as `Easy::unpause_write` - pub fn unpause_write(&self) -> Result<(), Error> { - self.easy.unpause_write() - } -} - -fn default_configure(handle: &mut Easy) { - handle.data.error_buf = RefCell::new(vec![0; curl_sys::CURL_ERROR_SIZE]); - handle.setopt_ptr(curl_sys::CURLOPT_ERRORBUFFER, - handle.data.error_buf.borrow().as_ptr() as *const _) - .expect("failed to set error buffer"); - let _ = handle.signal(false); - ssl_configure(handle); -} - -#[cfg(all(unix, not(target_os = "macos")))] -fn ssl_configure(handle: &mut Easy) { - let probe = ::openssl_probe::probe(); - if let Some(ref path) = probe.cert_file { - let _ = handle.cainfo(path); - } - if let Some(ref path) = probe.cert_dir { - let _ = handle.capath(path); - } -} - -#[cfg(not(all(unix, not(target_os = "macos"))))] -fn ssl_configure(_handle: &mut Easy) {} - -impl Drop for Easy { - fn drop(&mut self) { - unsafe { - curl_sys::curl_easy_cleanup(self.handle); - } - } -} - -impl List { - /// Creates a new empty list of strings. - pub fn new() -> List { - List { raw: 0 as *mut _ } - } - - /// Appends some data into this list. - pub fn append(&mut self, data: &str) -> Result<(), Error> { - let data = try!(CString::new(data)); - unsafe { - let raw = curl_sys::curl_slist_append(self.raw, data.as_ptr()); - assert!(!raw.is_null()); - self.raw = raw; - Ok(()) - } - } - - /// Returns an iterator over the nodes in this list. - pub fn iter(&self) -> Iter { - Iter { _me: self, cur: self.raw } - } -} - -impl Drop for List { - fn drop(&mut self) { - unsafe { - curl_sys::curl_slist_free_all(self.raw) - } - } -} - -impl<'a> Iterator for Iter<'a> { - type Item = &'a [u8]; - - fn next(&mut self) -> Option<&'a [u8]> { - if self.cur.is_null() { - return None - } - - unsafe { - let ret = Some(CStr::from_ptr((*self.cur).data).to_bytes()); - self.cur = (*self.cur).next; - return ret - } - } -} - -impl Form { - /// Creates a new blank form ready for the addition of new data. - pub fn new() -> Form { - Form { - head: 0 as *mut _, - tail: 0 as *mut _, - headers: Vec::new(), - buffers: Vec::new(), - strings: Vec::new(), - } - } - - /// Prepares adding a new part to this `Form` - /// - /// Note that the part is not actually added to the form until the `add` - /// method is called on `Part`, which may or may not fail. - pub fn part<'a, 'data>(&'a mut self, name: &'data str) -> Part<'a, 'data> { - Part { - error: None, - form: self, - name: name, - array: vec![curl_sys::curl_forms { - option: curl_sys::CURLFORM_END, - value: 0 as *mut _, - }], - } - } -} - -impl Drop for Form { - fn drop(&mut self) { - unsafe { - curl_sys::curl_formfree(self.head); - } - } -} - -impl<'form, 'data> Part<'form, 'data> { - /// A pointer to the contents of this part, the actual data to send away. - pub fn contents(&mut self, contents: &'data [u8]) -> &mut Self { - let pos = self.array.len() - 1; - self.array.insert(pos, curl_sys::curl_forms { - option: curl_sys::CURLFORM_COPYCONTENTS, - value: contents.as_ptr() as *mut _, - }); - self.array.insert(pos + 1, curl_sys::curl_forms { - option: curl_sys::CURLFORM_CONTENTSLENGTH, - value: contents.len() as *mut _, - }); - self - } - - /// Causes this file to be read and its contents used as data in this part - /// - /// This part does not automatically become a file upload part simply - /// because its data was read from a file. - /// - /// # Errors - /// - /// If the filename has any internal nul bytes or if on Windows it does not - /// contain a unicode filename then the `add` function will eventually - /// return an error. - pub fn file_content

(&mut self, file: P) -> &mut Self - where P: AsRef - { - self._file_content(file.as_ref()) - } - - fn _file_content(&mut self, file: &Path) -> &mut Self { - if let Some(bytes) = self.path2cstr(file) { - let pos = self.array.len() - 1; - self.array.insert(pos, curl_sys::curl_forms { - option: curl_sys::CURLFORM_FILECONTENT, - value: bytes.as_ptr() as *mut _, - }); - self.form.strings.push(bytes); - } - self - } - - /// Makes this part a file upload part of the given file. - /// - /// Sets the filename field to the basename of the provided file name, and - /// it reads the contents of the file and passes them as data and sets the - /// content type if the given file matches one of the internally known file - /// extensions. - /// - /// The given upload file must exist entirely on the filesystem before the - /// upload is started because libcurl needs to read the size of it - /// beforehand. - /// - /// Multiple files can be uploaded by calling this method multiple times and - /// content types can also be configured for each file (by calling that - /// next). - /// - /// # Errors - /// - /// If the filename has any internal nul bytes or if on Windows it does not - /// contain a unicode filename then this function will cause `add` to return - /// an error when called. - pub fn file(&mut self, file: &'data P) -> &mut Self - where P: AsRef - { - self._file(file.as_ref()) - } - - fn _file(&mut self, file: &'data Path) -> &mut Self { - if let Some(bytes) = self.path2cstr(file) { - let pos = self.array.len() - 1; - self.array.insert(pos, curl_sys::curl_forms { - option: curl_sys::CURLFORM_FILE, - value: bytes.as_ptr() as *mut _, - }); - self.form.strings.push(bytes); - } - self - } - - /// Used in combination with `Part::file`, provides the content-type for - /// this part, possibly instead of choosing an internal one. - /// - /// # Panics - /// - /// This function will panic if `content_type` contains an internal nul - /// byte. - pub fn content_type(&mut self, content_type: &'data str) -> &mut Self { - if let Some(bytes) = self.bytes2cstr(content_type.as_bytes()) { - let pos = self.array.len() - 1; - self.array.insert(pos, curl_sys::curl_forms { - option: curl_sys::CURLFORM_CONTENTTYPE, - value: bytes.as_ptr() as *mut _, - }); - self.form.strings.push(bytes); - } - self - } - - /// Used in combination with `Part::file`, provides the filename for - /// this part instead of the actual one. - /// - /// # Errors - /// - /// If `name` contains an internal nul byte, or if on Windows the path is - /// not valid unicode then this function will return an error when `add` is - /// called. - pub fn filename(&mut self, name: &'data P) -> &mut Self - where P: AsRef - { - self._filename(name.as_ref()) - } - - fn _filename(&mut self, name: &'data Path) -> &mut Self { - if let Some(bytes) = self.path2cstr(name) { - let pos = self.array.len() - 1; - self.array.insert(pos, curl_sys::curl_forms { - option: curl_sys::CURLFORM_FILENAME, - value: bytes.as_ptr() as *mut _, - }); - self.form.strings.push(bytes); - } - self - } - - /// This is used to provide a custom file upload part without using the - /// `file` method above. - /// - /// The first parameter is for the filename field and the second is the - /// in-memory contents. - /// - /// # Errors - /// - /// If `name` contains an internal nul byte, or if on Windows the path is - /// not valid unicode then this function will return an error when `add` is - /// called. - pub fn buffer(&mut self, name: &'data P, data: Vec) - -> &mut Self - where P: AsRef - { - self._buffer(name.as_ref(), data) - } - - fn _buffer(&mut self, name: &'data Path, data: Vec) -> &mut Self { - if let Some(bytes) = self.path2cstr(name) { - let pos = self.array.len() - 1; - self.array.insert(pos, curl_sys::curl_forms { - option: curl_sys::CURLFORM_BUFFER, - value: bytes.as_ptr() as *mut _, - }); - self.form.strings.push(bytes); - self.array.insert(pos + 1, curl_sys::curl_forms { - option: curl_sys::CURLFORM_BUFFERPTR, - value: data.as_ptr() as *mut _, - }); - self.array.insert(pos + 2, curl_sys::curl_forms { - option: curl_sys::CURLFORM_BUFFERLENGTH, - value: data.len() as *mut _, - }); - self.form.buffers.push(data); - } - self - } - - /// Specifies extra headers for the form POST section. - /// - /// Appends the list of headers to those libcurl automatically generates. - pub fn content_header(&mut self, headers: List) -> &mut Self { - let pos = self.array.len() - 1; - self.array.insert(pos, curl_sys::curl_forms { - option: curl_sys::CURLFORM_CONTENTHEADER, - value: headers.raw as *mut _, - }); - self.form.headers.push(headers); - self - } - - /// Attempts to add this part to the `Form` that it was created from. - /// - /// If any error happens while adding that error is returned, otherwise if - /// the part was successfully appended then `Ok(())` is returned. - pub fn add(&mut self) -> Result<(), FormError> { - if let Some(err) = self.error.clone() { - return Err(err) - } - let rc = unsafe { - curl_sys::curl_formadd(&mut self.form.head, - &mut self.form.tail, - curl_sys::CURLFORM_COPYNAME, - self.name.as_ptr(), - curl_sys::CURLFORM_NAMELENGTH, - self.name.len(), - curl_sys::CURLFORM_ARRAY, - self.array.as_ptr(), - curl_sys::CURLFORM_END) - }; - if rc == curl_sys::CURL_FORMADD_OK { - Ok(()) - } else { - Err(FormError::new(rc)) - } - } - - #[cfg(unix)] - fn path2cstr(&mut self, p: &Path) -> Option { - use std::os::unix::prelude::*; - self.bytes2cstr(p.as_os_str().as_bytes()) - } - - #[cfg(windows)] - fn path2cstr(&mut self, p: &Path) -> Option { - match p.to_str() { - Some(bytes) => self.bytes2cstr(bytes.as_bytes()), - None if self.error.is_none() => { - // TODO: better error code - self.error = Some(FormError::new(curl_sys::CURL_FORMADD_INCOMPLETE)); - None - } - None => None, - } - } - - fn bytes2cstr(&mut self, bytes: &[u8]) -> Option { - match CString::new(bytes) { - Ok(c) => Some(c), - Err(..) if self.error.is_none() => { - // TODO: better error code - self.error = Some(FormError::new(curl_sys::CURL_FORMADD_INCOMPLETE)); - None - } - Err(..) => None, - } - } -} - -impl Auth { - /// Creates a new set of authentications with no members. - /// - /// An `Auth` structure is used to configure which forms of authentication - /// are attempted when negotiating connections with servers. - pub fn new() -> Auth { - Auth { bits: 0 } - } - - /// HTTP Basic authentication. - /// - /// This is the default choice, and the only method that is in wide-spread - /// use and supported virtually everywhere. This sends the user name and - /// password over the network in plain text, easily captured by others. - pub fn basic(&mut self, on: bool) -> &mut Auth { - self.flag(curl_sys::CURLAUTH_BASIC, on) - } - - /// HTTP Digest authentication. - /// - /// Digest authentication is defined in RFC 2617 and is a more secure way to - /// do authentication over public networks than the regular old-fashioned - /// Basic method. - pub fn digest(&mut self, on: bool) -> &mut Auth { - self.flag(curl_sys::CURLAUTH_DIGEST, on) - } - - /// HTTP Digest authentication with an IE flavor. - /// - /// Digest authentication is defined in RFC 2617 and is a more secure way to - /// do authentication over public networks than the regular old-fashioned - /// Basic method. The IE flavor is simply that libcurl will use a special - /// "quirk" that IE is known to have used before version 7 and that some - /// servers require the client to use. - pub fn digest_ie(&mut self, on: bool) -> &mut Auth { - self.flag(curl_sys::CURLAUTH_DIGEST_IE, on) - } - - /// HTTP Negotiate (SPNEGO) authentication. - /// - /// Negotiate authentication is defined in RFC 4559 and is the most secure - /// way to perform authentication over HTTP. - /// - /// You need to build libcurl with a suitable GSS-API library or SSPI on - /// Windows for this to work. - pub fn gssnegotiate(&mut self, on: bool) -> &mut Auth { - self.flag(curl_sys::CURLAUTH_GSSNEGOTIATE, on) - } - - /// HTTP NTLM authentication. - /// - /// A proprietary protocol invented and used by Microsoft. It uses a - /// challenge-response and hash concept similar to Digest, to prevent the - /// password from being eavesdropped. - /// - /// You need to build libcurl with either OpenSSL, GnuTLS or NSS support for - /// this option to work, or build libcurl on Windows with SSPI support. - pub fn ntlm(&mut self, on: bool) -> &mut Auth { - self.flag(curl_sys::CURLAUTH_NTLM, on) - } - - /// NTLM delegating to winbind helper. - /// - /// Authentication is performed by a separate binary application that is - /// executed when needed. The name of the application is specified at - /// compile time but is typically /usr/bin/ntlm_auth - /// - /// Note that libcurl will fork when necessary to run the winbind - /// application and kill it when complete, calling waitpid() to await its - /// exit when done. On POSIX operating systems, killing the process will - /// cause a SIGCHLD signal to be raised (regardless of whether - /// CURLOPT_NOSIGNAL is set), which must be handled intelligently by the - /// application. In particular, the application must not unconditionally - /// call wait() in its SIGCHLD signal handler to avoid being subject to a - /// race condition. This behavior is subject to change in future versions of - /// libcurl. - /// - /// A proprietary protocol invented and used by Microsoft. It uses a - /// challenge-response and hash concept similar to Digest, to prevent the - /// password from being eavesdropped. - pub fn ntlm_wb(&mut self, on: bool) -> &mut Auth { - self.flag(curl_sys::CURLAUTH_NTLM_WB, on) - } - - fn flag(&mut self, bit: c_ulong, on: bool) -> &mut Auth { - if on { - self.bits |= bit as c_long; - } else { - self.bits &= !bit as c_long; - } - self - } -} diff -Nru cargo-0.17.0/vendor/curl-0.4.1/src/error.rs cargo-0.19.0/vendor/curl-0.4.1/src/error.rs --- cargo-0.17.0/vendor/curl-0.4.1/src/error.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/src/error.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,598 +0,0 @@ -use std::error; -use std::ffi::{self, CStr}; -use std::fmt; -use std::str; -use std::io; - -use curl_sys; - -/// An error returned from various "easy" operations. -/// -/// This structure wraps a `CURLcode`. -#[derive(Clone, PartialEq)] -pub struct Error { - code: curl_sys::CURLcode, - extra: Option>, -} - -pub fn error_with_extra(code: curl_sys::CURLcode, extra: Box) -> Error { - Error { - code: code, - extra: Some(extra), - } -} - -impl Error { - /// Creates a new error from the underlying code returned by libcurl. - pub fn new(code: curl_sys::CURLcode) -> Error { - Error { - code: code, - extra: None, - } - } - - /// Returns whether this error corresponds to CURLE_UNSUPPORTED_PROTOCOL. - pub fn is_unsupported_protocol(&self) -> bool { - self.code == curl_sys::CURLE_UNSUPPORTED_PROTOCOL - } - - /// Returns whether this error corresponds to CURLE_FAILED_INIT. - pub fn is_failed_init(&self) -> bool { - self.code == curl_sys::CURLE_FAILED_INIT - } - - /// Returns whether this error corresponds to CURLE_URL_MALFORMAT. - pub fn is_url_malformed(&self) -> bool { - self.code == curl_sys::CURLE_URL_MALFORMAT - } - - // /// Returns whether this error corresponds to CURLE_NOT_BUILT_IN. - // pub fn is_not_built_in(&self) -> bool { - // self.code == curl_sys::CURLE_NOT_BUILT_IN - // } - - /// Returns whether this error corresponds to CURLE_COULDNT_RESOLVE_PROXY. - pub fn is_couldnt_resolve_proxy(&self) -> bool { - self.code == curl_sys::CURLE_COULDNT_RESOLVE_PROXY - } - - /// Returns whether this error corresponds to CURLE_COULDNT_RESOLVE_HOST. - pub fn is_couldnt_resolve_host(&self) -> bool { - self.code == curl_sys::CURLE_COULDNT_RESOLVE_HOST - } - - /// Returns whether this error corresponds to CURLE_COULDNT_CONNECT. - pub fn is_couldnt_connect(&self) -> bool { - self.code == curl_sys::CURLE_COULDNT_CONNECT - } - - /// Returns whether this error corresponds to CURLE_REMOTE_ACCESS_DENIED. - pub fn is_remote_access_denied(&self) -> bool { - self.code == curl_sys::CURLE_REMOTE_ACCESS_DENIED - } - - /// Returns whether this error corresponds to CURLE_PARTIAL_FILE. - pub fn is_partial_file(&self) -> bool { - self.code == curl_sys::CURLE_PARTIAL_FILE - } - - /// Returns whether this error corresponds to CURLE_QUOTE_ERROR. - pub fn is_quote_error(&self) -> bool { - self.code == curl_sys::CURLE_QUOTE_ERROR - } - - /// Returns whether this error corresponds to CURLE_HTTP_RETURNED_ERROR. - pub fn is_http_returned_error(&self) -> bool { - self.code == curl_sys::CURLE_HTTP_RETURNED_ERROR - } - - /// Returns whether this error corresponds to CURLE_READ_ERROR. - pub fn is_read_error(&self) -> bool { - self.code == curl_sys::CURLE_READ_ERROR - } - - /// Returns whether this error corresponds to CURLE_WRITE_ERROR. - pub fn is_write_error(&self) -> bool { - self.code == curl_sys::CURLE_WRITE_ERROR - } - - /// Returns whether this error corresponds to CURLE_UPLOAD_FAILED. - pub fn is_upload_failed(&self) -> bool { - self.code == curl_sys::CURLE_UPLOAD_FAILED - } - - /// Returns whether this error corresponds to CURLE_OUT_OF_MEMORY. - pub fn is_out_of_memory(&self) -> bool { - self.code == curl_sys::CURLE_OUT_OF_MEMORY - } - - /// Returns whether this error corresponds to CURLE_OPERATION_TIMEDOUT. - pub fn is_operation_timedout(&self) -> bool { - self.code == curl_sys::CURLE_OPERATION_TIMEDOUT - } - - /// Returns whether this error corresponds to CURLE_RANGE_ERROR. - pub fn is_range_error(&self) -> bool { - self.code == curl_sys::CURLE_RANGE_ERROR - } - - /// Returns whether this error corresponds to CURLE_HTTP_POST_ERROR. - pub fn is_http_post_error(&self) -> bool { - self.code == curl_sys::CURLE_HTTP_POST_ERROR - } - - /// Returns whether this error corresponds to CURLE_SSL_CONNECT_ERROR. - pub fn is_ssl_connect_error(&self) -> bool { - self.code == curl_sys::CURLE_SSL_CONNECT_ERROR - } - - /// Returns whether this error corresponds to CURLE_BAD_DOWNLOAD_RESUME. - pub fn is_bad_download_resume(&self) -> bool { - self.code == curl_sys::CURLE_BAD_DOWNLOAD_RESUME - } - - /// Returns whether this error corresponds to CURLE_FILE_COULDNT_READ_FILE. - pub fn is_file_couldnt_read_file(&self) -> bool { - self.code == curl_sys::CURLE_FILE_COULDNT_READ_FILE - } - - /// Returns whether this error corresponds to CURLE_FUNCTION_NOT_FOUND. - pub fn is_function_not_found(&self) -> bool { - self.code == curl_sys::CURLE_FUNCTION_NOT_FOUND - } - - /// Returns whether this error corresponds to CURLE_ABORTED_BY_CALLBACK. - pub fn is_aborted_by_callback(&self) -> bool { - self.code == curl_sys::CURLE_ABORTED_BY_CALLBACK - } - - /// Returns whether this error corresponds to CURLE_BAD_FUNCTION_ARGUMENT. - pub fn is_bad_function_argument(&self) -> bool { - self.code == curl_sys::CURLE_BAD_FUNCTION_ARGUMENT - } - - /// Returns whether this error corresponds to CURLE_INTERFACE_FAILED. - pub fn is_interface_failed(&self) -> bool { - self.code == curl_sys::CURLE_INTERFACE_FAILED - } - - /// Returns whether this error corresponds to CURLE_TOO_MANY_REDIRECTS. - pub fn is_too_many_redirects(&self) -> bool { - self.code == curl_sys::CURLE_TOO_MANY_REDIRECTS - } - - /// Returns whether this error corresponds to CURLE_UNKNOWN_OPTION. - pub fn is_unknown_option(&self) -> bool { - self.code == curl_sys::CURLE_UNKNOWN_OPTION - } - - /// Returns whether this error corresponds to CURLE_PEER_FAILED_VERIFICATION. - pub fn is_peer_failed_verification(&self) -> bool { - self.code == curl_sys::CURLE_PEER_FAILED_VERIFICATION - } - - /// Returns whether this error corresponds to CURLE_GOT_NOTHING. - pub fn is_got_nothing(&self) -> bool { - self.code == curl_sys::CURLE_GOT_NOTHING - } - - /// Returns whether this error corresponds to CURLE_SSL_ENGINE_NOTFOUND. - pub fn is_ssl_engine_notfound(&self) -> bool { - self.code == curl_sys::CURLE_SSL_ENGINE_NOTFOUND - } - - /// Returns whether this error corresponds to CURLE_SSL_ENGINE_SETFAILED. - pub fn is_ssl_engine_setfailed(&self) -> bool { - self.code == curl_sys::CURLE_SSL_ENGINE_SETFAILED - } - - /// Returns whether this error corresponds to CURLE_SEND_ERROR. - pub fn is_send_error(&self) -> bool { - self.code == curl_sys::CURLE_SEND_ERROR - } - - /// Returns whether this error corresponds to CURLE_RECV_ERROR. - pub fn is_recv_error(&self) -> bool { - self.code == curl_sys::CURLE_RECV_ERROR - } - - /// Returns whether this error corresponds to CURLE_SSL_CERTPROBLEM. - pub fn is_ssl_certproblem(&self) -> bool { - self.code == curl_sys::CURLE_SSL_CERTPROBLEM - } - - /// Returns whether this error corresponds to CURLE_SSL_CIPHER. - pub fn is_ssl_cipher(&self) -> bool { - self.code == curl_sys::CURLE_SSL_CIPHER - } - - /// Returns whether this error corresponds to CURLE_SSL_CACERT. - pub fn is_ssl_cacert(&self) -> bool { - self.code == curl_sys::CURLE_SSL_CACERT - } - - /// Returns whether this error corresponds to CURLE_BAD_CONTENT_ENCODING. - pub fn is_bad_content_encoding(&self) -> bool { - self.code == curl_sys::CURLE_BAD_CONTENT_ENCODING - } - - /// Returns whether this error corresponds to CURLE_FILESIZE_EXCEEDED. - pub fn is_filesize_exceeded(&self) -> bool { - self.code == curl_sys::CURLE_FILESIZE_EXCEEDED - } - - /// Returns whether this error corresponds to CURLE_USE_SSL_FAILED. - pub fn is_use_ssl_failed(&self) -> bool { - self.code == curl_sys::CURLE_USE_SSL_FAILED - } - - /// Returns whether this error corresponds to CURLE_SEND_FAIL_REWIND. - pub fn is_send_fail_rewind(&self) -> bool { - self.code == curl_sys::CURLE_SEND_FAIL_REWIND - } - - /// Returns whether this error corresponds to CURLE_SSL_ENGINE_INITFAILED. - pub fn is_ssl_engine_initfailed(&self) -> bool { - self.code == curl_sys::CURLE_SSL_ENGINE_INITFAILED - } - - /// Returns whether this error corresponds to CURLE_LOGIN_DENIED. - pub fn is_login_denied(&self) -> bool { - self.code == curl_sys::CURLE_LOGIN_DENIED - } - - /// Returns whether this error corresponds to CURLE_CONV_FAILED. - pub fn is_conv_failed(&self) -> bool { - self.code == curl_sys::CURLE_CONV_FAILED - } - - /// Returns whether this error corresponds to CURLE_CONV_REQD. - pub fn is_conv_required(&self) -> bool { - self.code == curl_sys::CURLE_CONV_REQD - } - - /// Returns whether this error corresponds to CURLE_SSL_CACERT_BADFILE. - pub fn is_ssl_cacert_badfile(&self) -> bool { - self.code == curl_sys::CURLE_SSL_CACERT_BADFILE - } - - /// Returns whether this error corresponds to CURLE_SSL_CRL_BADFILE. - pub fn is_ssl_crl_badfile(&self) -> bool { - self.code == curl_sys::CURLE_SSL_CRL_BADFILE - } - - /// Returns whether this error corresponds to CURLE_SSL_SHUTDOWN_FAILED. - pub fn is_ssl_shutdown_failed(&self) -> bool { - self.code == curl_sys::CURLE_SSL_SHUTDOWN_FAILED - } - - /// Returns whether this error corresponds to CURLE_AGAIN. - pub fn is_again(&self) -> bool { - self.code == curl_sys::CURLE_AGAIN - } - - /// Returns whether this error corresponds to CURLE_SSL_ISSUER_ERROR. - pub fn is_ssl_issuer_error(&self) -> bool { - self.code == curl_sys::CURLE_SSL_ISSUER_ERROR - } - - /// Returns whether this error corresponds to CURLE_CHUNK_FAILED. - pub fn is_chunk_failed(&self) -> bool { - self.code == curl_sys::CURLE_CHUNK_FAILED - } - - // /// Returns whether this error corresponds to CURLE_NO_CONNECTION_AVAILABLE. - // pub fn is_no_connection_available(&self) -> bool { - // self.code == curl_sys::CURLE_NO_CONNECTION_AVAILABLE - // } - - /// Returns the value of the underlying error corresponding to libcurl. - pub fn code(&self) -> curl_sys::CURLcode { - self.code - } - - /// Returns the extra description of this error, if any is available. - pub fn extra_description(&self) -> Option<&str> { - self.extra.as_ref().map(|s| &**s) - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let desc = error::Error::description(self); - match self.extra { - Some(ref s) => write!(f, "[{}] {} ({})", self.code(), desc, s), - None => write!(f, "[{}] {}", self.code(), desc), - } - } -} - -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Error") - .field("description", &error::Error::description(self)) - .field("code", &self.code) - .field("extra", &self.extra) - .finish() - } -} - -impl error::Error for Error { - fn description(&self) -> &str { - unsafe { - let s = curl_sys::curl_easy_strerror(self.code); - assert!(!s.is_null()); - str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() - } - } -} - -/// An error returned from "share" operations. -/// -/// This structure wraps a `CURLSHcode`. -#[derive(Clone, PartialEq)] -pub struct ShareError { - code: curl_sys::CURLSHcode, -} - -impl ShareError { - /// Creates a new error from the underlying code returned by libcurl. - pub fn new(code: curl_sys::CURLSHcode) -> ShareError { - ShareError { code: code } - } - - /// Returns whether this error corresponds to CURLSHE_BAD_OPTION. - pub fn is_bad_option(&self) -> bool { - self.code == curl_sys::CURLSHE_BAD_OPTION - } - - /// Returns whether this error corresponds to CURLSHE_IN_USE. - pub fn is_in_use(&self) -> bool { - self.code == curl_sys::CURLSHE_IN_USE - } - - /// Returns whether this error corresponds to CURLSHE_INVALID. - pub fn is_invalid(&self) -> bool { - self.code == curl_sys::CURLSHE_INVALID - } - - /// Returns whether this error corresponds to CURLSHE_NOMEM. - pub fn is_nomem(&self) -> bool { - self.code == curl_sys::CURLSHE_NOMEM - } - - // /// Returns whether this error corresponds to CURLSHE_NOT_BUILT_IN. - // pub fn is_not_built_in(&self) -> bool { - // self.code == curl_sys::CURLSHE_NOT_BUILT_IN - // } - - /// Returns the value of the underlying error corresponding to libcurl. - pub fn code(&self) -> curl_sys::CURLSHcode { - self.code - } -} - -impl fmt::Display for ShareError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - error::Error::description(self).fmt(f) - } -} - -impl fmt::Debug for ShareError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "ShareError {{ description: {:?}, code: {} }}", - error::Error::description(self), - self.code) - } -} - -impl error::Error for ShareError { - fn description(&self) -> &str { - unsafe { - let s = curl_sys::curl_share_strerror(self.code); - assert!(!s.is_null()); - str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() - } - } -} - -/// An error from "multi" operations. -/// -/// THis structure wraps a `CURLMcode`. -#[derive(Clone, PartialEq)] -pub struct MultiError { - code: curl_sys::CURLMcode, -} - -impl MultiError { - /// Creates a new error from the underlying code returned by libcurl. - pub fn new(code: curl_sys::CURLMcode) -> MultiError { - MultiError { code: code } - } - - /// Returns whether this error corresponds to CURLM_BAD_HANDLE. - pub fn is_bad_handle(&self) -> bool { - self.code == curl_sys::CURLM_BAD_HANDLE - } - - /// Returns whether this error corresponds to CURLM_BAD_EASY_HANDLE. - pub fn is_bad_easy_handle(&self) -> bool { - self.code == curl_sys::CURLM_BAD_EASY_HANDLE - } - - /// Returns whether this error corresponds to CURLM_OUT_OF_MEMORY. - pub fn is_out_of_memory(&self) -> bool { - self.code == curl_sys::CURLM_OUT_OF_MEMORY - } - - /// Returns whether this error corresponds to CURLM_INTERNAL_ERROR. - pub fn is_internal_error(&self) -> bool { - self.code == curl_sys::CURLM_INTERNAL_ERROR - } - - /// Returns whether this error corresponds to CURLM_BAD_SOCKET. - pub fn is_bad_socket(&self) -> bool { - self.code == curl_sys::CURLM_BAD_SOCKET - } - - /// Returns whether this error corresponds to CURLM_UNKNOWN_OPTION. - pub fn is_unknown_option(&self) -> bool { - self.code == curl_sys::CURLM_UNKNOWN_OPTION - } - - /// Returns whether this error corresponds to CURLM_CALL_MULTI_PERFORM. - pub fn is_call_perform(&self) -> bool { - self.code == curl_sys::CURLM_CALL_MULTI_PERFORM - } - - // /// Returns whether this error corresponds to CURLM_ADDED_ALREADY. - // pub fn is_added_already(&self) -> bool { - // self.code == curl_sys::CURLM_ADDED_ALREADY - // } - - /// Returns the value of the underlying error corresponding to libcurl. - pub fn code(&self) -> curl_sys::CURLMcode { - self.code - } -} - -impl fmt::Display for MultiError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - error::Error::description(self).fmt(f) - } -} - -impl fmt::Debug for MultiError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "MultiError {{ description: {:?}, code: {} }}", - error::Error::description(self), - self.code) - } -} - -impl error::Error for MultiError { - fn description(&self) -> &str { - unsafe { - let s = curl_sys::curl_multi_strerror(self.code); - assert!(!s.is_null()); - str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() - } - } -} - - -/// An error from "form add" operations. -/// -/// THis structure wraps a `CURLFORMcode`. -#[derive(Clone, PartialEq)] -pub struct FormError { - code: curl_sys::CURLFORMcode, -} - -impl FormError { - /// Creates a new error from the underlying code returned by libcurl. - pub fn new(code: curl_sys::CURLFORMcode) -> FormError { - FormError { code: code } - } - - /// Returns whether this error corresponds to CURL_FORMADD_MEMORY. - pub fn is_memory(&self) -> bool { - self.code == curl_sys::CURL_FORMADD_MEMORY - } - - /// Returns whether this error corresponds to CURL_FORMADD_OPTION_TWICE. - pub fn is_option_twice(&self) -> bool { - self.code == curl_sys::CURL_FORMADD_OPTION_TWICE - } - - /// Returns whether this error corresponds to CURL_FORMADD_NULL. - pub fn is_null(&self) -> bool { - self.code == curl_sys::CURL_FORMADD_NULL - } - - /// Returns whether this error corresponds to CURL_FORMADD_UNKNOWN_OPTION. - pub fn is_unknown_option(&self) -> bool { - self.code == curl_sys::CURL_FORMADD_UNKNOWN_OPTION - } - - /// Returns whether this error corresponds to CURL_FORMADD_INCOMPLETE. - pub fn is_incomplete(&self) -> bool { - self.code == curl_sys::CURL_FORMADD_INCOMPLETE - } - - /// Returns whether this error corresponds to CURL_FORMADD_ILLEGAL_ARRAY. - pub fn is_illegal_array(&self) -> bool { - self.code == curl_sys::CURL_FORMADD_ILLEGAL_ARRAY - } - - /// Returns whether this error corresponds to CURL_FORMADD_DISABLED. - pub fn is_disabled(&self) -> bool { - self.code == curl_sys::CURL_FORMADD_DISABLED - } - - /// Returns the value of the underlying error corresponding to libcurl. - pub fn code(&self) -> curl_sys::CURLFORMcode { - self.code - } -} - -impl fmt::Display for FormError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - error::Error::description(self).fmt(f) - } -} - -impl fmt::Debug for FormError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "FormError {{ description: {:?}, code: {} }}", - error::Error::description(self), - self.code) - } -} - -impl error::Error for FormError { - fn description(&self) -> &str { - match self.code { - curl_sys::CURL_FORMADD_MEMORY => "allocation failure", - curl_sys::CURL_FORMADD_OPTION_TWICE => "one option passed twice", - curl_sys::CURL_FORMADD_NULL => "null pointer given for string", - curl_sys::CURL_FORMADD_UNKNOWN_OPTION => "unknown option", - curl_sys::CURL_FORMADD_INCOMPLETE => "form information not complete", - curl_sys::CURL_FORMADD_ILLEGAL_ARRAY => "illegal array in option", - curl_sys::CURL_FORMADD_DISABLED => { - "libcurl does not have support for this option compiled in" - } - _ => "unknown form error", - } - } -} - -impl From for Error { - fn from(_: ffi::NulError) -> Error { - Error { code: curl_sys::CURLE_CONV_FAILED, extra: None } - } -} - -impl From for io::Error { - fn from(e: Error) -> io::Error { - io::Error::new(io::ErrorKind::Other, e) - } -} - -impl From for io::Error { - fn from(e: ShareError) -> io::Error { - io::Error::new(io::ErrorKind::Other, e) - } -} - -impl From for io::Error { - fn from(e: MultiError) -> io::Error { - io::Error::new(io::ErrorKind::Other, e) - } -} - -impl From for io::Error { - fn from(e: FormError) -> io::Error { - io::Error::new(io::ErrorKind::Other, e) - } -} diff -Nru cargo-0.17.0/vendor/curl-0.4.1/src/lib.rs cargo-0.19.0/vendor/curl-0.4.1/src/lib.rs --- cargo-0.17.0/vendor/curl-0.4.1/src/lib.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,116 +0,0 @@ -//! Rust bindings to the libcurl C library -//! -//! This crate contains bindings for an HTTP/HTTPS client which is powered by -//! [libcurl], the same library behind the `curl` command line tool. The API -//! currently closely matches that of libcurl itself, except that a Rustic layer -//! of safety is applied on top. -//! -//! [libcurl]: https://curl.haxx.se/libcurl/ -//! -//! # The "Easy" API -//! -//! The easiest way to send a request is to use the `Easy` api which corresponds -//! to `CURL` in libcurl. This handle supports a wide variety of options and can -//! be used to make a single blocking request in a thread. Callbacks can be -//! specified to deal with data as it arrives and a handle can be reused to -//! cache connections and such. -//! -//! ```rust,no_run -//! use std::io::{stdout, Write}; -//! -//! use curl::easy::Easy; -//! -//! // Write the contents of rust-lang.org to stdout -//! let mut easy = Easy::new(); -//! easy.url("https://www.rust-lang.org/").unwrap(); -//! easy.write_function(|data| { -//! Ok(stdout().write(data).unwrap()) -//! }).unwrap(); -//! easy.perform().unwrap(); -//! ``` -//! -//! # What about multiple concurrent HTTP requests? -//! -//! One option you have currently is to send multiple requests in multiple -//! threads, but otherwise libcurl has a "multi" interface for doing this -//! operation. Initial bindings of this interface can be found in the `multi` -//! module, but feedback is welcome! -//! -//! # Where does libcurl come from? -//! -//! This crate links to the `curl-sys` crate which is in turn responsible for -//! acquiring and linking to the libcurl library. Currently this crate will -//! build libcurl from source if one is not already detected on the system. -//! -//! There is a large number of releases for libcurl, all with different sets of -//! capabilities. Robust programs may wish to inspect `Version::get()` to test -//! what features are implemented in the linked build of libcurl at runtime. - -#![deny(missing_docs)] - -extern crate curl_sys; -extern crate libc; - -#[cfg(all(unix, not(target_os = "macos")))] -extern crate openssl_sys; -#[cfg(all(unix, not(target_os = "macos")))] -extern crate openssl_probe; -#[cfg(windows)] -extern crate winapi; - -use std::ffi::CStr; -use std::str; -use std::sync::{Once, ONCE_INIT}; - -pub use error::{Error, ShareError, MultiError, FormError}; -mod error; - -pub use version::{Version, Protocols}; -mod version; - -mod panic; -pub mod easy; -pub mod multi; - -/// Initializes the underlying libcurl library. -/// -/// It's not required to call this before the library is used, but it's -/// recommended to do so as soon as the program starts. -pub fn init() { - static INIT: Once = ONCE_INIT; - INIT.call_once(|| { - platform_init(); - unsafe { - assert_eq!(curl_sys::curl_global_init(curl_sys::CURL_GLOBAL_ALL), 0); - libc::atexit(cleanup); - } - }); - - extern fn cleanup() { - unsafe { curl_sys::curl_global_cleanup(); } - } - - #[cfg(all(unix, not(target_os = "macos")))] - fn platform_init() { - openssl_sys::init(); - } - - #[cfg(not(all(unix, not(target_os = "macos"))))] - fn platform_init() {} -} - -unsafe fn opt_str<'a>(ptr: *const libc::c_char) -> Option<&'a str> { - if ptr.is_null() { - None - } else { - Some(str::from_utf8(CStr::from_ptr(ptr).to_bytes()).unwrap()) - } -} - -fn cvt(r: curl_sys::CURLcode) -> Result<(), Error> { - if r == curl_sys::CURLE_OK { - Ok(()) - } else { - Err(Error::new(r)) - } -} diff -Nru cargo-0.17.0/vendor/curl-0.4.1/src/multi.rs cargo-0.19.0/vendor/curl-0.4.1/src/multi.rs --- cargo-0.17.0/vendor/curl-0.4.1/src/multi.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/src/multi.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,642 +0,0 @@ -//! Multi - initiating multiple requests simultaneously - -use std::marker; -use std::time::Duration; - -use libc::{c_int, c_char, c_void, c_long}; -use curl_sys; - -#[cfg(windows)] -use winapi::fd_set; -#[cfg(unix)] -use libc::fd_set; - -use {MultiError, Error}; -use easy::Easy; -use panic; - -/// A multi handle for initiating multiple connections simultaneously. -/// -/// This structure corresponds to `CURLM` in libcurl and provides the ability to -/// have multiple transfers in flight simultaneously. This handle is then used -/// to manage each transfer. The main purpose of a `CURLM` is for the -/// *application* to drive the I/O rather than libcurl itself doing all the -/// blocking. Methods like `action` allow the application to inform libcurl of -/// when events have happened. -/// -/// Lots more documentation can be found on the libcurl [multi tutorial] where -/// the APIs correspond pretty closely with this crate. -/// -/// [multi tutorial]: https://curl.haxx.se/libcurl/c/libcurl-multi.html -pub struct Multi { - raw: *mut curl_sys::CURLM, - data: Box, -} - -struct MultiData { - socket: Box, - timer: Box) -> bool + Send>, -} - -/// Message from the `messages` function of a multi handle. -/// -/// Currently only indicates whether a transfer is done. -pub struct Message<'multi> { - ptr: *mut curl_sys::CURLMsg, - _multi: &'multi Multi, -} - -/// Wrapper around an easy handle while it's owned by a multi handle. -/// -/// Once an easy handle has been added to a multi handle then it can no longer -/// be used via `perform`. This handle is also used to remove the easy handle -/// from the multi handle when desired. -pub struct EasyHandle { - easy: Easy, - // This is now effecitvely bound to a `Multi`, so it is no longer sendable. - _marker: marker::PhantomData<&'static Multi>, -} - -/// Notification of the events that have happened on a socket. -/// -/// This type is passed as an argument to the `action` method on a multi handle -/// to indicate what events have occurred on a socket. -pub struct Events { - bits: c_int, -} - -/// Notification of events that are requested on a socket. -/// -/// This type is yielded to the `socket_function` callback to indicate what -/// events are requested on a socket. -#[derive(Debug)] -pub struct SocketEvents { - bits: c_int, -} - -/// Raw underlying socket type that the multi handles use -pub type Socket = curl_sys::curl_socket_t; - -impl Multi { - /// Creates a new multi session through which multiple HTTP transfers can be - /// initiated. - pub fn new() -> Multi { - unsafe { - ::init(); - let ptr = curl_sys::curl_multi_init(); - assert!(!ptr.is_null()); - Multi { - raw: ptr, - data: Box::new(MultiData { - socket: Box::new(|_, _, _| ()), - timer: Box::new(|_| true), - }), - } - } - } - - /// Set the callback informed about what to wait for - /// - /// When the `action` function runs, it informs the application about - /// updates in the socket (file descriptor) status by doing none, one, or - /// multiple calls to the socket callback. The callback gets status updates - /// with changes since the previous time the callback was called. See - /// `action` for more details on how the callback is used and should work. - /// - /// The `SocketEvents` parameter informs the callback on the status of the - /// given socket, and the methods on that type can be used to learn about - /// what's going on with the socket. - /// - /// The third `usize` parameter is a custom value set by the `assign` method - /// below. - pub fn socket_function(&mut self, f: F) -> Result<(), MultiError> - where F: FnMut(Socket, SocketEvents, usize) + Send + 'static, - { - self._socket_function(Box::new(f)) - } - - fn _socket_function(&mut self, - f: Box) - -> Result<(), MultiError> - { - self.data.socket = f; - let cb: curl_sys::curl_socket_callback = cb; - try!(self.setopt_ptr(curl_sys::CURLMOPT_SOCKETFUNCTION, - cb as usize as *const c_char)); - let ptr = &*self.data as *const _; - try!(self.setopt_ptr(curl_sys::CURLMOPT_SOCKETDATA, - ptr as *const c_char)); - return Ok(()); - - // TODO: figure out how to expose `_easy` - extern fn cb(_easy: *mut curl_sys::CURL, - socket: curl_sys::curl_socket_t, - what: c_int, - userptr: *mut c_void, - socketp: *mut c_void) -> c_int { - panic::catch(|| unsafe { - let f = &mut (*(userptr as *mut MultiData)).socket; - f(socket, SocketEvents { bits: what }, socketp as usize) - }); - 0 - } - } - - /// Set data to associate with an internal socket - /// - /// This function creates an association in the multi handle between the - /// given socket and a private token of the application. This is designed - /// for `action` uses. - /// - /// When set, the token will be passed to all future socket callbacks for - /// the specified socket. - /// - /// If the given socket isn't already in use by libcurl, this function will - /// return an error. - /// - /// libcurl only keeps one single token associated with a socket, so - /// calling this function several times for the same socket will make the - /// last set token get used. - /// - /// The idea here being that this association (socket to token) is something - /// that just about every application that uses this API will need and then - /// libcurl can just as well do it since it already has an internal hash - /// table lookup for this. - /// - /// # Typical Usage - /// - /// In a typical application you allocate a struct or at least use some kind - /// of semi-dynamic data for each socket that we must wait for action on - /// when using the `action` approach. - /// - /// When our socket-callback gets called by libcurl and we get to know about - /// yet another socket to wait for, we can use `assign` to point out the - /// particular data so that when we get updates about this same socket - /// again, we don't have to find the struct associated with this socket by - /// ourselves. - pub fn assign(&self, - socket: Socket, - token: usize) -> Result<(), MultiError> { - unsafe { - try!(cvt(curl_sys::curl_multi_assign(self.raw, socket, - token as *mut _))); - Ok(()) - } - } - - /// Set callback to receive timeout values - /// - /// Certain features, such as timeouts and retries, require you to call - /// libcurl even when there is no activity on the file descriptors. - /// - /// Your callback function should install a non-repeating timer with the - /// interval specified. Each time that timer fires, call either `action` or - /// `perform`, depending on which interface you use. - /// - /// A timeout value of `None` means you should delete your timer. - /// - /// A timeout value of 0 means you should call `action` or `perform` (once) - /// as soon as possible. - /// - /// This callback will only be called when the timeout changes. - /// - /// The timer callback should return `true` on success, and `false` on - /// error. This callback can be used instead of, or in addition to, - /// `get_timeout`. - pub fn timer_function(&mut self, f: F) -> Result<(), MultiError> - where F: FnMut(Option) -> bool + Send + 'static, - { - self._timer_function(Box::new(f)) - } - - fn _timer_function(&mut self, - f: Box) -> bool + Send>) - -> Result<(), MultiError> - { - self.data.timer = f; - let cb: curl_sys::curl_multi_timer_callback = cb; - try!(self.setopt_ptr(curl_sys::CURLMOPT_TIMERFUNCTION, - cb as usize as *const c_char)); - let ptr = &*self.data as *const _; - try!(self.setopt_ptr(curl_sys::CURLMOPT_TIMERDATA, - ptr as *const c_char)); - return Ok(()); - - // TODO: figure out how to expose `_multi` - extern fn cb(_multi: *mut curl_sys::CURLM, - timeout_ms: c_long, - user: *mut c_void) -> c_int { - let keep_going = panic::catch(|| unsafe { - let f = &mut (*(user as *mut MultiData)).timer; - if timeout_ms == -1 { - f(None) - } else { - f(Some(Duration::from_millis(timeout_ms as u64))) - } - }).unwrap_or(false); - if keep_going {0} else {-1} - } - } - - fn setopt_ptr(&mut self, - opt: curl_sys::CURLMoption, - val: *const c_char) -> Result<(), MultiError> { - unsafe { - cvt(curl_sys::curl_multi_setopt(self.raw, opt, val)) - } - } - - /// Add an easy handle to a multi session - /// - /// Adds a standard easy handle to the multi stack. This function call will - /// make this multi handle control the specified easy handle. - /// - /// When an easy interface is added to a multi handle, it will use a shared - /// connection cache owned by the multi handle. Removing and adding new easy - /// handles will not affect the pool of connections or the ability to do - /// connection re-use. - /// - /// If you have `timer_function` set in the multi handle (and you really - /// should if you're working event-based with `action` and friends), that - /// callback will be called from within this function to ask for an updated - /// timer so that your main event loop will get the activity on this handle - /// to get started. - /// - /// The easy handle will remain added to the multi handle until you remove - /// it again with `remove` on the returned handle - even when a transfer - /// with that specific easy handle is completed. - pub fn add(&self, mut easy: Easy) -> Result { - // Clear any configuration set by previous transfers because we're - // moving this into a `Send+'static` situation now basically. - easy.transfer(); - - unsafe { - try!(cvt(curl_sys::curl_multi_add_handle(self.raw, easy.raw()))); - } - Ok(EasyHandle { - easy: easy, - _marker: marker::PhantomData, - }) - } - - /// Remove an easy handle from this multi session - /// - /// Removes the easy handle from this multi handle. This will make the - /// returned easy handle be removed from this multi handle's control. - /// - /// When the easy handle has been removed from a multi stack, it is again - /// perfectly legal to invoke `perform` on it. - /// - /// Removing an easy handle while being used is perfectly legal and will - /// effectively halt the transfer in progress involving that easy handle. - /// All other easy handles and transfers will remain unaffected. - pub fn remove(&self, easy: EasyHandle) -> Result { - unsafe { - try!(cvt(curl_sys::curl_multi_remove_handle(self.raw, - easy.easy.raw()))); - } - Ok(easy.easy) - } - - /// Read multi stack informationals - /// - /// Ask the multi handle if there are any messages/informationals from the - /// individual transfers. Messages may include informationals such as an - /// error code from the transfer or just the fact that a transfer is - /// completed. More details on these should be written down as well. - pub fn messages(&self, mut f: F) where F: FnMut(Message) { - self._messages(&mut f) - } - - fn _messages(&self, mut f: &mut FnMut(Message)) { - let mut queue = 0; - unsafe { - loop { - let ptr = curl_sys::curl_multi_info_read(self.raw, &mut queue); - if ptr.is_null() { - break - } - f(Message { ptr: ptr, _multi: self }) - } - } - } - - /// Inform of reads/writes available data given an action - /// - /// When the application has detected action on a socket handled by libcurl, - /// it should call this function with the sockfd argument set to - /// the socket with the action. When the events on a socket are known, they - /// can be passed `events`. When the events on a socket are unknown, pass - /// `Events::new()` instead, and libcurl will test the descriptor - /// internally. - /// - /// The returned integer will contain the number of running easy handles - /// within the multi handle. When this number reaches zero, all transfers - /// are complete/done. When you call `action` on a specific socket and the - /// counter decreases by one, it DOES NOT necessarily mean that this exact - /// socket/transfer is the one that completed. Use `messages` to figure out - /// which easy handle that completed. - /// - /// The `action` function informs the application about updates in the - /// socket (file descriptor) status by doing none, one, or multiple calls to - /// the socket callback function set with the `socket_function` method. They - /// update the status with changes since the previous time the callback was - /// called. - pub fn action(&self, socket: Socket, events: &Events) - -> Result { - let mut remaining = 0; - unsafe { - try!(cvt(curl_sys::curl_multi_socket_action(self.raw, - socket, - events.bits, - &mut remaining))); - Ok(remaining as u32) - } - } - - /// Inform libcurl that a timeout has expired and sockets should be tested. - /// - /// The returned integer will contain the number of running easy handles - /// within the multi handle. When this number reaches zero, all transfers - /// are complete/done. When you call `action` on a specific socket and the - /// counter decreases by one, it DOES NOT necessarily mean that this exact - /// socket/transfer is the one that completed. Use `messages` to figure out - /// which easy handle that completed. - /// - /// Get the timeout time by calling the `timer_function` method. Your - /// application will then get called with information on how long to wait - /// for socket actions at most before doing the timeout action: call the - /// `timeout` method. You can also use the `get_timeout` function to - /// poll the value at any given time, but for an event-based system using - /// the callback is far better than relying on polling the timeout value. - pub fn timeout(&self) -> Result { - let mut remaining = 0; - unsafe { - try!(cvt(curl_sys::curl_multi_socket_action(self.raw, - curl_sys::CURL_SOCKET_BAD, - 0, - &mut remaining))); - Ok(remaining as u32) - } - } - - /// Get how long to wait for action before proceeding - /// - /// An application using the libcurl multi interface should call - /// `get_timeout` to figure out how long it should wait for socket actions - - /// at most - before proceeding. - /// - /// Proceeding means either doing the socket-style timeout action: call the - /// `timeout` function, or call `perform` if you're using the simpler and - /// older multi interface approach. - /// - /// The timeout value returned is the duration at this very moment. If 0, it - /// means you should proceed immediately without waiting for anything. If it - /// returns `None`, there's no timeout at all set. - /// - /// Note: if libcurl returns a `None` timeout here, it just means that - /// libcurl currently has no stored timeout value. You must not wait too - /// long (more than a few seconds perhaps) before you call `perform` again. - pub fn get_timeout(&self) -> Result, MultiError> { - let mut ms = 0; - unsafe { - try!(cvt(curl_sys::curl_multi_timeout(self.raw, &mut ms))); - if ms == -1 { - Ok(None) - } else { - Ok(Some(Duration::from_millis(ms as u64))) - } - } - } - - /// Reads/writes available data from each easy handle. - /// - /// This function handles transfers on all the added handles that need - /// attention in an non-blocking fashion. - /// - /// When an application has found out there's data available for this handle - /// or a timeout has elapsed, the application should call this function to - /// read/write whatever there is to read or write right now etc. This - /// method returns as soon as the reads/writes are done. This function does - /// not require that there actually is any data available for reading or - /// that data can be written, it can be called just in case. It will return - /// the number of handles that still transfer data. - /// - /// If the amount of running handles is changed from the previous call (or - /// is less than the amount of easy handles you've added to the multi - /// handle), you know that there is one or more transfers less "running". - /// You can then call `info` to get information about each individual - /// completed transfer, and that returned info includes `Error` and more. - /// If an added handle fails very quickly, it may never be counted as a - /// running handle. - /// - /// When running_handles is set to zero (0) on the return of this function, - /// there is no longer any transfers in progress. - /// - /// # Return - /// - /// Before libcurl version 7.20.0: If you receive `is_call_perform`, this - /// basically means that you should call `perform` again, before you select - /// on more actions. You don't have to do it immediately, but the return - /// code means that libcurl may have more data available to return or that - /// there may be more data to send off before it is "satisfied". Do note - /// that `perform` will return `is_call_perform` only when it wants to be - /// called again immediately. When things are fine and there is nothing - /// immediate it wants done, it'll return `Ok` and you need to wait for - /// "action" and then call this function again. - /// - /// This function only returns errors etc regarding the whole multi stack. - /// Problems still might have occurred on individual transfers even when - /// this function returns `Ok`. Use `info` to figure out how individual - /// transfers did. - pub fn perform(&self) -> Result { - unsafe { - let mut ret = 0; - try!(cvt(curl_sys::curl_multi_perform(self.raw, &mut ret))); - Ok(ret as u32) - } - } - - /// Extracts file descriptor information from a multi handle - /// - /// This function extracts file descriptor information from a given - /// handle, and libcurl returns its `fd_set` sets. The application can use - /// these to `select()` on, but be sure to `FD_ZERO` them before calling - /// this function as curl_multi_fdset only adds its own descriptors, it - /// doesn't zero or otherwise remove any others. The curl_multi_perform - /// function should be called as soon as one of them is ready to be read - /// from or written to. - /// - /// If no file descriptors are set by libcurl, this function will return - /// `Ok(None)`. Otherwise `Ok(Some(n))` will be returned where `n` the - /// highest descriptor number libcurl set. When `Ok(None)` is returned it - /// is because libcurl currently does something that isn't possible for - /// your application to monitor with a socket and unfortunately you can - /// then not know exactly when the current action is completed using - /// `select()`. You then need to wait a while before you proceed and call - /// `perform` anyway. - /// - /// When doing `select()`, you should use `get_timeout` to figure out - /// how long to wait for action. Call `perform` even if no activity has - /// been seen on the `fd_set`s after the timeout expires as otherwise - /// internal retries and timeouts may not work as you'd think and want. - /// - /// If one of the sockets used by libcurl happens to be larger than what - /// can be set in an `fd_set`, which on POSIX systems means that the file - /// descriptor is larger than `FD_SETSIZE`, then libcurl will try to not - /// set it. Setting a too large file descriptor in an `fd_set` implies an out - /// of bounds write which can cause crashes, or worse. The effect of NOT - /// storing it will possibly save you from the crash, but will make your - /// program NOT wait for sockets it should wait for... - pub fn fdset(&self, - read: Option<&mut fd_set>, - write: Option<&mut fd_set>, - except: Option<&mut fd_set>) -> Result, MultiError> { - unsafe { - let mut ret = 0; - let read = read.map(|r| r as *mut _).unwrap_or(0 as *mut _); - let write = write.map(|r| r as *mut _).unwrap_or(0 as *mut _); - let except = except.map(|r| r as *mut _).unwrap_or(0 as *mut _); - try!(cvt(curl_sys::curl_multi_fdset(self.raw, read, write, except, - &mut ret))); - if ret == -1 { - Ok(None) - } else { - Ok(Some(ret)) - } - } - } - - /// Attempt to close the multi handle and clean up all associated resources. - /// - /// Cleans up and removes a whole multi stack. It does not free or touch any - /// individual easy handles in any way - they still need to be closed - /// individually. - pub fn close(&self) -> Result<(), MultiError> { - unsafe { - cvt(curl_sys::curl_multi_cleanup(self.raw)) - } - } -} - -fn cvt(code: curl_sys::CURLMcode) -> Result<(), MultiError> { - if code == curl_sys::CURLM_OK { - Ok(()) - } else { - Err(MultiError::new(code)) - } -} - -impl Drop for Multi { - fn drop(&mut self) { - let _ = self.close(); - } -} - -impl EasyHandle { - /// Sets an internal private token for this `EasyHandle`. - /// - /// This function will set the `CURLOPT_PRIVATE` field on the underlying - /// easy handle. - pub fn set_token(&mut self, token: usize) -> Result<(), Error> { - unsafe { - ::cvt(curl_sys::curl_easy_setopt(self.easy.raw(), - curl_sys::CURLOPT_PRIVATE, - token)) - } - } -} - -impl<'multi> Message<'multi> { - /// If this message indicates that a transfer has finished, returns the - /// result of the transfer in `Some`. - /// - /// If the message doesn't indicate that a transfer has finished, then - /// `None` is returned. - pub fn result(&self) -> Option> { - unsafe { - if (*self.ptr).msg == curl_sys::CURLMSG_DONE { - Some(::cvt((*self.ptr).data as curl_sys::CURLcode)) - } else { - None - } - } - } - - /// Returns whether this easy message was for the specified easy handle or - /// not. - pub fn is_for(&self, handle: &EasyHandle) -> bool { - unsafe { (*self.ptr).easy_handle == handle.easy.raw() } - } - - /// Returns the token associated with the easy handle that this message - /// represents a completion for. - /// - /// This function will return the token assigned with - /// `EasyHandle::set_token`. This reads the `CURLINFO_PRIVATE` field of the - /// underlying `*mut CURL`. - pub fn token(&self) -> Result { - unsafe { - let mut p = 0usize; - try!(::cvt(curl_sys::curl_easy_getinfo((*self.ptr).easy_handle, - curl_sys::CURLINFO_PRIVATE, - &mut p))); - Ok(p) - } - } -} - -impl Events { - /// Creates a new blank event bit mask. - pub fn new() -> Events { - Events { bits: 0 } - } - - /// Set or unset the whether these events indicate that input is ready. - pub fn input(&mut self, val: bool) -> &mut Events { - self.flag(curl_sys::CURL_CSELECT_IN, val) - } - - /// Set or unset the whether these events indicate that output is ready. - pub fn output(&mut self, val: bool) -> &mut Events { - self.flag(curl_sys::CURL_CSELECT_OUT, val) - } - - /// Set or unset the whether these events indicate that an error has - /// happened. - pub fn error(&mut self, val: bool) -> &mut Events { - self.flag(curl_sys::CURL_CSELECT_ERR, val) - } - - fn flag(&mut self, flag: c_int, val: bool) -> &mut Events { - if val { - self.bits |= flag; - } else { - self.bits &= !flag; - } - self - } -} - -impl SocketEvents { - /// Wait for incoming data. For the socket to become readable. - pub fn input(&self) -> bool { - self.bits & curl_sys::CURL_POLL_IN == curl_sys::CURL_POLL_IN - } - - /// Wait for outgoing data. For the socket to become writable. - pub fn output(&self) -> bool { - self.bits & curl_sys::CURL_POLL_OUT == curl_sys::CURL_POLL_OUT - } - - /// Wait for incoming and outgoing data. For the socket to become readable - /// or writable. - pub fn input_and_output(&self) -> bool { - self.bits & curl_sys::CURL_POLL_INOUT == curl_sys::CURL_POLL_INOUT - } - - /// The specified socket/file descriptor is no longer used by libcurl. - pub fn remove(&self) -> bool { - self.bits & curl_sys::CURL_POLL_REMOVE == curl_sys::CURL_POLL_REMOVE - } -} diff -Nru cargo-0.17.0/vendor/curl-0.4.1/src/panic.rs cargo-0.19.0/vendor/curl-0.4.1/src/panic.rs --- cargo-0.17.0/vendor/curl-0.4.1/src/panic.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/src/panic.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -use std::any::Any; -use std::cell::RefCell; -use std::panic::{self, AssertUnwindSafe}; - -thread_local!(static LAST_ERROR: RefCell>> = { - RefCell::new(None) -}); - -pub fn catch T>(f: F) -> Option { - if LAST_ERROR.with(|slot| slot.borrow().is_some()) { - return None - } - - // Note that `AssertUnwindSafe` is used here as we prevent reentering - // arbitrary code due to the `LAST_ERROR` check above plus propagation of a - // panic after we return back to user code from C. - match panic::catch_unwind(AssertUnwindSafe(f)) { - Ok(ret) => Some(ret), - Err(e) => { - LAST_ERROR.with(|slot| *slot.borrow_mut() = Some(e)); - None - } - } -} - -pub fn propagate() { - if let Some(t) = LAST_ERROR.with(|slot| slot.borrow_mut().take()) { - panic::resume_unwind(t) - } -} diff -Nru cargo-0.17.0/vendor/curl-0.4.1/src/version.rs cargo-0.19.0/vendor/curl-0.4.1/src/version.rs --- cargo-0.17.0/vendor/curl-0.4.1/src/version.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/src/version.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,243 +0,0 @@ -use std::ffi::CStr; -use std::str; - -use curl_sys; -use libc::{c_int, c_char}; - -/// Version information about libcurl and the capabilities that it supports. -pub struct Version { - inner: *mut curl_sys::curl_version_info_data, -} - -unsafe impl Send for Version {} -unsafe impl Sync for Version {} - -/// An iterator over the list of protocols a version supports. -pub struct Protocols<'a> { - cur: *const *const c_char, - _inner: &'a Version, -} - -impl Version { - /// Returns the libcurl version that this library is currently linked against. - pub fn num() -> &'static str { - unsafe { - let s = CStr::from_ptr(curl_sys::curl_version() as *const _); - str::from_utf8(s.to_bytes()).unwrap() - } - } - - /// Returns the libcurl version that this library is currently linked against. - pub fn get() -> Version { - unsafe { - let ptr = curl_sys::curl_version_info(curl_sys::CURLVERSION_FOURTH); - assert!(!ptr.is_null()); - Version { inner: ptr } - } - } - - /// Returns the human readable version string, - pub fn version(&self) -> &str { - unsafe { - ::opt_str((*self.inner).version).unwrap() - } - } - - /// Returns a numeric representation of the version number - /// - /// This is a 24 bit number made up of the major number, minor, and then - /// patch number. For example 7.9.8 willr eturn 0x070908. - pub fn version_num(&self) -> u32 { - unsafe { - (*self.inner).version_num as u32 - } - } - - /// Returns a human readable string of the host libcurl is built for. - /// - /// This is discovered as part of the build environment. - pub fn host(&self) -> &str { - unsafe { - ::opt_str((*self.inner).host).unwrap() - } - } - - /// Returns whether libcurl supports IPv6 - pub fn feature_ipv6(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_IPV6) - } - - /// Returns whether libcurl supports SSL - pub fn feature_ssl(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_SSL) - } - - /// Returns whether libcurl supports HTTP deflate via libz - pub fn feature_libz(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_LIBZ) - } - - /// Returns whether libcurl supports HTTP NTLM - pub fn feature_ntlm(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_NTLM) - } - - /// Returns whether libcurl supports HTTP GSSNEGOTIATE - pub fn feature_gss_negotiate(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_GSSNEGOTIATE) - } - - /// Returns whether libcurl was built with debug capabilities - pub fn feature_debug(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_DEBUG) - } - - /// Returns whether libcurl was built with SPNEGO authentication - pub fn feature_spnego(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_SPNEGO) - } - - /// Returns whether libcurl was built with large file support - pub fn feature_largefile(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_LARGEFILE) - } - - /// Returns whether libcurl was built with support for IDNA, domain names - /// with international letters. - pub fn feature_idn(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_IDN) - } - - /// Returns whether libcurl was built with support for SSPI. - pub fn feature_sspi(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_SSPI) - } - - /// Returns whether libcurl was built with asynchronous name lookups. - pub fn feature_async_dns(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_ASYNCHDNS) - } - - /// Returns whether libcurl was built with support for character - /// conversions. - pub fn feature_conv(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_CONV) - } - - /// Returns whether libcurl was built with support for TLS-SRP. - pub fn feature_tlsauth_srp(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_TLSAUTH_SRP) - } - - /// Returns whether libcurl was built with support for NTLM delegation to - /// winbind helper. - pub fn feature_ntlm_wb(&self) -> bool { - self.flag(curl_sys::CURL_VERSION_NTLM_WB) - } - - // /// Returns whether libcurl was built with support for HTTP2. - // pub fn feature_http2(&self) -> bool { - // self.flag(curl_sys::CURL_VERSION_HTTP2) - // } - - fn flag(&self, flag: c_int) -> bool { - unsafe { - (*self.inner).features & flag != 0 - } - } - - /// Returns the version of OpenSSL that is used, or None if there is no SSL - /// support. - pub fn ssl_version(&self) -> Option<&str> { - unsafe { - ::opt_str((*self.inner).ssl_version) - } - } - - /// Returns the version of libz that is used, or None if there is no libz - /// support. - pub fn libz_version(&self) -> Option<&str> { - unsafe { - ::opt_str((*self.inner).libz_version) - } - } - - /// Returns an iterator over the list of protocols that this build of - /// libcurl supports. - pub fn protocols(&self) -> Protocols { - unsafe { - Protocols { _inner: self, cur: (*self.inner).protocols } - } - } - - /// If available, the human readable version of ares that libcurl is linked - /// against. - pub fn ares_version(&self) -> Option<&str> { - unsafe { - if (*self.inner).age >= 1 { - ::opt_str((*self.inner).ares) - } else { - None - } - } - } - - /// If available, the version of ares that libcurl is linked against. - pub fn ares_version_num(&self) -> Option { - unsafe { - if (*self.inner).age >= 1 { - Some((*self.inner).ares_num as u32) - } else { - None - } - } - } - - /// If available, the version of libidn that libcurl is linked against. - pub fn libidn_version(&self) -> Option<&str> { - unsafe { - if (*self.inner).age >= 2 { - ::opt_str((*self.inner).libidn) - } else { - None - } - } - } - - /// If available, the version of iconv libcurl is linked against. - pub fn iconv_version_num(&self) -> Option { - unsafe { - if (*self.inner).age >= 3 { - Some((*self.inner).iconv_ver_num as u32) - } else { - None - } - } - } - - /// If available, the version of iconv libcurl is linked against. - pub fn libssh_version(&self) -> Option<&str> { - unsafe { - if (*self.inner).age >= 3 { - ::opt_str((*self.inner).libssh_version) - } else { - None - } - } - } -} - -impl<'a> Iterator for Protocols<'a> { - type Item = &'a str; - - fn next(&mut self) -> Option<&'a str> { - unsafe { - if (*self.cur).is_null() { - return None - } - let ret = ::opt_str(*self.cur).unwrap(); - self.cur = self.cur.offset(1); - Some(ret) - } - } -} diff -Nru cargo-0.17.0/vendor/curl-0.4.1/tests/easy.rs cargo-0.19.0/vendor/curl-0.4.1/tests/easy.rs --- cargo-0.17.0/vendor/curl-0.4.1/tests/easy.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/tests/easy.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,666 +0,0 @@ -extern crate curl; - -use std::cell::{RefCell, Cell}; -use std::io::Read; -use std::rc::Rc; -use std::str; -use std::time::Duration; - -macro_rules! t { - ($e:expr) => (match $e { - Ok(e) => e, - Err(e) => panic!("{} failed with {:?}", stringify!($e), e), - }) -} - -use curl::easy::{Easy, List, WriteError, ReadError, Transfer}; - -use server::Server; -mod server; - -fn handle() -> Easy { - let mut e = Easy::new(); - t!(e.timeout(Duration::new(20, 0))); - return e -} - -fn sink(data: &[u8]) -> Result { - Ok(data.len()) -} - -#[test] -fn get_smoke() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("HTTP/1.1 200 OK\r\n\r\n"); - - let mut handle = handle(); - t!(handle.url(&s.url("/"))); - t!(handle.perform()); -} - -#[test] -fn get_path() { - let s = Server::new(); - s.receive("\ -GET /foo HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("HTTP/1.1 200 OK\r\n\r\n"); - - let mut handle = handle(); - t!(handle.url(&s.url("/foo"))); - t!(handle.perform()); -} - -#[test] -fn write_callback() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("HTTP/1.1 200 OK\r\n\r\nhello!"); - - let mut all = Vec::::new(); - { - let mut handle = handle(); - t!(handle.url(&s.url("/"))); - let mut handle = handle.transfer(); - t!(handle.write_function(|data| { - all.extend(data); - Ok(data.len()) - })); - t!(handle.perform()); - } - assert_eq!(all, b"hello!"); -} - -#[test] -fn progress() { - let s = Server::new(); - s.receive("\ -GET /foo HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("HTTP/1.1 200 OK\r\n\r\nHello!"); - - let mut hits = 0; - let mut dl = 0.0; - { - let mut handle = handle(); - t!(handle.url(&s.url("/foo"))); - t!(handle.progress(true)); - t!(handle.write_function(sink)); - - let mut handle = handle.transfer(); - t!(handle.progress_function(|_, a, _, _| { - hits += 1; - dl = a; - true - })); - t!(handle.perform()); - } - assert!(hits > 0); - assert_eq!(dl, 6.0); -} - -#[test] -fn headers() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -Foo: bar\r\n\ -Bar: baz\r\n\ -\r\n -Hello!"); - - let mut headers = Vec::new(); - { - let mut handle = handle(); - t!(handle.url(&s.url("/"))); - - let mut handle = handle.transfer(); - t!(handle.header_function(|h| { - headers.push(str::from_utf8(h).unwrap().to_string()); - true - })); - t!(handle.write_function(sink)); - t!(handle.perform()); - } - assert_eq!(headers, vec![ - "HTTP/1.1 200 OK\r\n".to_string(), - "Foo: bar\r\n".to_string(), - "Bar: baz\r\n".to_string(), - "\r\n".to_string(), - ]); -} - -#[test] -fn fail_on_error() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 401 Not so good\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.fail_on_error(true)); - assert!(h.perform().is_err()); - - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 401 Not so good\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.fail_on_error(false)); - t!(h.perform()); -} - -#[test] -fn port() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: localhost:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url("http://localhost/")); - t!(h.port(s.addr().port())); - t!(h.perform()); -} - -#[test] -fn proxy() { - let s = Server::new(); - s.receive("\ -GET http://example.com/ HTTP/1.1\r\n\ -Host: example.com\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url("http://example.com/")); - t!(h.proxy(&s.url("/"))); - t!(h.perform()); -} - -#[test] -fn noproxy() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.proxy(&s.url("/"))); - t!(h.noproxy("127.0.0.1")); - t!(h.perform()); -} - -#[test] -fn misc() { - let mut h = handle(); - t!(h.tcp_nodelay(true)); - // t!(h.tcp_keepalive(true)); - // t!(h.tcp_keepidle(Duration::new(3, 0))); - // t!(h.tcp_keepintvl(Duration::new(3, 0))); - t!(h.buffer_size(10)); - t!(h.dns_cache_timeout(Duration::new(1, 0))); -} - -#[test] -fn userpass() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Authorization: Basic YmFyOg==\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.username("foo")); - t!(h.username("bar")); - t!(h.perform()); -} - -#[test] -fn accept_encoding() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Accept-Encoding: gzip\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.accept_encoding("gzip")); - t!(h.perform()); -} - -#[test] -fn follow_location() { - let s1 = Server::new(); - let s2 = Server::new(); - s1.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s1.send(&format!("\ -HTTP/1.1 301 Moved Permanently\r\n\ -Location: http://{}/foo\r\n\ -\r\n", s2.addr())); - - s2.receive("\ -GET /foo HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s2.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s1.url("/"))); - t!(h.follow_location(true)); - t!(h.perform()); -} - -#[test] -fn put() { - let s = Server::new(); - s.receive("\ -PUT / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Content-Length: 5\r\n\ -\r\n\ -data\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut data = "data\n".as_bytes(); - let mut list = List::new(); - t!(list.append("Expect:")); - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.put(true)); - t!(h.in_filesize(5)); - t!(h.upload(true)); - t!(h.http_headers(list)); - let mut h = h.transfer(); - t!(h.read_function(|buf| { - Ok(data.read(buf).unwrap()) - })); - t!(h.perform()); -} - -#[test] -fn post1() { - let s = Server::new(); - s.receive("\ -POST / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Content-Length: 5\r\n\ -Content-Type: application/x-www-form-urlencoded\r\n\ -\r\n\ -data\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.post(true)); - t!(h.post_fields_copy(b"data\n")); - t!(h.perform()); -} - -#[test] -fn post2() { - let s = Server::new(); - s.receive("\ -POST / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Content-Length: 5\r\n\ -Content-Type: application/x-www-form-urlencoded\r\n\ -\r\n\ -data\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.post(true)); - t!(h.post_fields_copy(b"data\n")); - t!(h.write_function(sink)); - t!(h.perform()); -} - -#[test] -fn post3() { - let s = Server::new(); - s.receive("\ -POST / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Content-Length: 5\r\n\ -Content-Type: application/x-www-form-urlencoded\r\n\ -\r\n\ -data\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut data = "data\n".as_bytes(); - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.post(true)); - t!(h.post_field_size(5)); - let mut h = h.transfer(); - t!(h.read_function(|buf| { - Ok(data.read(buf).unwrap()) - })); - t!(h.perform()); -} - -#[test] -fn referer() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Referer: foo\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.referer("foo")); - t!(h.perform()); -} - -#[test] -fn useragent() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -User-Agent: foo\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.useragent("foo")); - t!(h.perform()); -} - -#[test] -fn custom_headers() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Foo: bar\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut custom = List::new(); - t!(custom.append("Foo: bar")); - t!(custom.append("Accept:")); - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.http_headers(custom)); - t!(h.perform()); -} - -#[test] -fn cookie() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Cookie: foo\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.cookie("foo")); - t!(h.perform()); -} - -#[test] -fn url_encoding() { - let mut h = handle(); - assert_eq!(h.url_encode(b"foo"), "foo"); - assert_eq!(h.url_encode(b"foo bar"), "foo%20bar"); - assert_eq!(h.url_encode(b"foo bar\xff"), "foo%20bar%FF"); - assert_eq!(h.url_encode(b""), ""); - assert_eq!(h.url_decode("foo"), b"foo"); - assert_eq!(h.url_decode("foo%20bar"), b"foo bar"); - assert_eq!(h.url_decode("foo%2"), b"foo%2"); - assert_eq!(h.url_decode("foo%xx"), b"foo%xx"); - assert_eq!(h.url_decode("foo%ff"), b"foo\xff"); - assert_eq!(h.url_decode(""), b""); -} - -#[test] -fn getters() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.cookie_file("/dev/null")); - t!(h.perform()); - assert_eq!(t!(h.response_code()), 200); - assert_eq!(t!(h.redirect_count()), 0); - assert_eq!(t!(h.redirect_url()), None); - assert_eq!(t!(h.content_type()), None); - - let addr = format!("http://{}/", s.addr()); - assert_eq!(t!(h.effective_url()), Some(&addr[..])); - - // TODO: test this - // let cookies = t!(h.cookies()).iter() - // .map(|s| s.to_vec()) - // .collect::>(); - // assert_eq!(cookies.len(), 1); -} - -#[test] -#[should_panic] -fn panic_in_callback() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.header_function(|_| panic!())); - t!(h.perform()); -} - -#[test] -fn abort_read() { - let s = Server::new(); - s.receive("\ -PUT / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Content-Length: 2\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.read_function(|_| Err(ReadError::Abort))); - t!(h.put(true)); - t!(h.in_filesize(2)); - let mut list = List::new(); - t!(list.append("Expect:")); - t!(h.http_headers(list)); - let err = h.perform().unwrap_err(); - assert!(err.is_aborted_by_callback()); -} - -#[test] -fn pause_write_then_resume() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n -a\n -b"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.progress(true)); - - struct State<'a, 'b> { - paused: Cell, - unpaused: Cell, - transfer: RefCell>, - } - - let h = Rc::new(State { - paused: Cell::new(false), - unpaused: Cell::new(false), - transfer: RefCell::new(h.transfer()), - }); - - let h2 = h.clone(); - t!(h.transfer.borrow_mut().write_function(move |data| { - if h2.unpaused.get() { - h2.unpaused.set(false); - Ok(data.len()) - } else { - h2.paused.set(true); - Err(WriteError::Pause) - } - })); - let h2 = h.clone(); - t!(h.transfer.borrow_mut().progress_function(move |_, _, _, _| { - if h2.paused.get() { - h2.paused.set(false); - h2.unpaused.set(true); - t!(h2.transfer.borrow().unpause_write()); - } - true - })); - t!(h.transfer.borrow().perform()); -} - -#[test] -fn perform_in_perform_is_bad() { - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n -a\n -b"); - - let mut h = handle(); - t!(h.url(&s.url("/"))); - t!(h.progress(true)); - - let h = Rc::new(RefCell::new(h.transfer())); - - let h2 = h.clone(); - t!(h.borrow_mut().write_function(move |data| { - assert!(h2.borrow().perform().is_err()); - Ok(data.len()) - })); - t!(h.borrow().perform()); -} diff -Nru cargo-0.17.0/vendor/curl-0.4.1/tests/formdata cargo-0.19.0/vendor/curl-0.4.1/tests/formdata --- cargo-0.17.0/vendor/curl-0.4.1/tests/formdata 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/tests/formdata 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -hello diff -Nru cargo-0.17.0/vendor/curl-0.4.1/tests/multi.rs cargo-0.19.0/vendor/curl-0.4.1/tests/multi.rs --- cargo-0.17.0/vendor/curl-0.4.1/tests/multi.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/tests/multi.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,223 +0,0 @@ -#![cfg(unix)] - -extern crate curl; -extern crate mio; - -use std::collections::HashMap; -use std::io::{Read, Cursor}; -use std::time::Duration; - -use curl::easy::{Easy, List}; -use curl::multi::Multi; - -macro_rules! t { - ($e:expr) => (match $e { - Ok(e) => e, - Err(e) => panic!("{} failed with {:?}", stringify!($e), e), - }) -} - -use server::Server; -mod server; - -#[test] -fn smoke() { - let m = Multi::new(); - let mut e = Easy::new(); - - let s = Server::new(); - s.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s.send("HTTP/1.1 200 OK\r\n\r\n"); - - t!(e.url(&s.url("/"))); - let _e = t!(m.add(e)); - while t!(m.perform()) > 0 { - // ... - } -} - -#[test] -fn smoke2() { - let m = Multi::new(); - - let s1 = Server::new(); - s1.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s1.send("HTTP/1.1 200 OK\r\n\r\n"); - - let s2 = Server::new(); - s2.receive("\ -GET / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -\r\n"); - s2.send("HTTP/1.1 200 OK\r\n\r\n"); - - let mut e1 = Easy::new(); - t!(e1.url(&s1.url("/"))); - let _e1 = t!(m.add(e1)); - let mut e2 = Easy::new(); - t!(e2.url(&s2.url("/"))); - let _e2 = t!(m.add(e2)); - - while t!(m.perform()) > 0 { - // ... - } - - let mut done = 0; - m.messages(|msg| { - msg.result().unwrap().unwrap(); - done += 1; - }); - assert_eq!(done, 2); -} - -#[test] -fn upload_lots() { - use curl::multi::{Socket, SocketEvents, Events}; - - #[derive(Debug)] - enum Message { - Timeout(Option), - Wait(Socket, SocketEvents, usize), - } - - let mut m = Multi::new(); - let poll = t!(mio::Poll::new()); - let (tx, rx) = mio::channel::channel(); - let tx2 = tx.clone(); - t!(m.socket_function(move |socket, events, token| { - t!(tx2.send(Message::Wait(socket, events, token))); - })); - t!(m.timer_function(move |dur| { - t!(tx.send(Message::Timeout(dur))); - true - })); - - let s = Server::new(); - s.receive(&format!("\ -PUT / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Content-Length: 131072\r\n\ -\r\n\ -{}\n", vec!["a"; 128 * 1024 - 1].join(""))); - s.send("\ -HTTP/1.1 200 OK\r\n\ -\r\n"); - - let mut data = vec![b'a'; 128 * 1024 - 1]; - data.push(b'\n'); - let mut data = Cursor::new(data); - let mut list = List::new(); - t!(list.append("Expect:")); - let mut h = Easy::new(); - t!(h.url(&s.url("/"))); - t!(h.put(true)); - t!(h.read_function(move |buf| { - Ok(data.read(buf).unwrap()) - })); - t!(h.in_filesize(128 * 1024)); - t!(h.upload(true)); - t!(h.http_headers(list)); - - t!(poll.register(&rx, - mio::Token(0), - mio::Ready::all(), - mio::PollOpt::level())); - - let e = t!(m.add(h)); - - assert!(t!(m.perform()) > 0); - let mut next_token = 1; - let mut token_map = HashMap::new(); - let mut cur_timeout = None; - let mut events = mio::Events::with_capacity(128); - let mut running = true; - - while running { - let n = t!(poll.poll(&mut events, cur_timeout)); - - if n == 0 { - if t!(m.timeout()) == 0 { - running = false; - } - } - - for event in events.iter() { - while event.token() == mio::Token(0) { - match rx.try_recv() { - Ok(Message::Timeout(dur)) => cur_timeout = dur, - Ok(Message::Wait(socket, events, token)) => { - let evented = mio::unix::EventedFd(&socket); - if events.remove() { - token_map.remove(&token).unwrap(); - } else { - let mut e = mio::Ready::none(); - if events.input() { - e = e | mio::Ready::readable(); - } - if events.output() { - e = e | mio::Ready::writable(); - } - if token == 0 { - let token = next_token; - next_token += 1; - t!(m.assign(socket, token)); - token_map.insert(token, socket); - t!(poll.register(&evented, - mio::Token(token), - e, - mio::PollOpt::level())); - } else { - t!(poll.reregister(&evented, - mio::Token(token), - e, - mio::PollOpt::level())); - } - } - } - Err(_) => break, - } - } - - if event.token() == mio::Token(0) { - continue - } - - let token = event.token(); - let socket = token_map[&token.into()]; - let mut e = Events::new(); - if event.kind().is_readable() { - e.input(true); - } - if event.kind().is_writable() { - e.output(true); - } - if event.kind().is_error() { - e.error(true); - } - let remaining = t!(m.action(socket, &e)); - if remaining == 0 { - running = false; - } - } - } - - let mut done = 0; - m.messages(|m| { - m.result().unwrap().unwrap(); - done += 1; - }); - assert_eq!(done, 1); - - let mut e = t!(m.remove(e)); - assert_eq!(t!(e.response_code()), 200); -} diff -Nru cargo-0.17.0/vendor/curl-0.4.1/tests/post.rs cargo-0.19.0/vendor/curl-0.4.1/tests/post.rs --- cargo-0.17.0/vendor/curl-0.4.1/tests/post.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/tests/post.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ -extern crate curl; - -use std::str; -use std::time::Duration; - -macro_rules! t { - ($e:expr) => (match $e { - Ok(e) => e, - Err(e) => panic!("{} failed with {:?}", stringify!($e), e), - }) -} - -use curl::easy::{Easy, Form}; - -use server::Server; -mod server; - -fn handle() -> Easy { - let mut e = Easy::new(); - t!(e.timeout(Duration::new(20, 0))); - return e -} - -#[test] -fn custom() { - let s = Server::new(); - s.receive("\ -POST / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Content-Length: 142\r\n\ -Expect: 100-continue\r\n\ -Content-Type: multipart/form-data; boundary=--[..]\r\n\ -\r\n\ ---[..]\r\n\ -Content-Disposition: form-data; name=\"foo\"\r\n\ -\r\n\ -1234\r\n\ ---[..]\r\n"); - s.send("HTTP/1.1 200 OK\r\n\r\n"); - - let mut handle = handle(); - let mut form = Form::new(); - t!(form.part("foo").contents(b"1234").add()); - t!(handle.url(&s.url("/"))); - t!(handle.httppost(form)); - t!(handle.perform()); -} - -#[test] -fn buffer() { - let s = Server::new(); - s.receive("\ -POST / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Content-Length: 181\r\n\ -Expect: 100-continue\r\n\ -Content-Type: multipart/form-data; boundary=--[..]\r\n\ -\r\n\ ---[..]\r\n\ -Content-Disposition: form-data; name=\"foo\"; filename=\"bar\"\r\n\ -Content-Type: foo/bar\r\n\ -\r\n\ -1234\r\n\ ---[..]\r\n"); - s.send("HTTP/1.1 200 OK\r\n\r\n"); - - let mut handle = handle(); - let mut form = Form::new(); - t!(form.part("foo") - .buffer("bar", b"1234".to_vec()) - .content_type("foo/bar") - .add()); - t!(handle.url(&s.url("/"))); - t!(handle.httppost(form)); - t!(handle.perform()); -} - -#[test] -fn file() { - let s = Server::new(); - s.receive("\ -POST / HTTP/1.1\r\n\ -Host: 127.0.0.1:$PORT\r\n\ -Accept: */*\r\n\ -Content-Length: 205\r\n\ -Expect: 100-continue\r\n\ -Content-Type: multipart/form-data; boundary=--[..]\r\n\ -\r\n\ ---[..]\r\n\ -Content-Disposition: form-data; name=\"foo\"; filename=\"formdata\"\r\n\ -Content-Type: application/octet-stream\r\n\ -\r\n\ -hello\n\ -\r\n\ ---[..]\r\n"); - s.send("HTTP/1.1 200 OK\r\n\r\n"); - - let mut handle = handle(); - let mut form = Form::new(); - t!(form.part("foo") - .file("tests/formdata") - .add()); - t!(handle.url(&s.url("/"))); - t!(handle.httppost(form)); - t!(handle.perform()); -} diff -Nru cargo-0.17.0/vendor/curl-0.4.1/tests/server/mod.rs cargo-0.19.0/vendor/curl-0.4.1/tests/server/mod.rs --- cargo-0.17.0/vendor/curl-0.4.1/tests/server/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/tests/server/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,175 +0,0 @@ -#![allow(dead_code)] - -use std::collections::HashSet; -use std::net::{TcpListener, SocketAddr, TcpStream}; -use std::io::prelude::*; -use std::thread; -use std::sync::mpsc::{Sender, Receiver, channel}; -use std::io::BufReader; - -pub struct Server { - messages: Option>, - addr: SocketAddr, - thread: Option>, -} - -enum Message { - Read(String), - Write(String), -} - -fn run(listener: &TcpListener, rx: &Receiver) { - let mut socket = BufReader::new(listener.accept().unwrap().0); - for msg in rx.iter() { - match msg { - Message::Read(ref expected) => { - let mut expected = &expected[..]; - let mut expected_headers = HashSet::new(); - while let Some(i) = expected.find("\n") { - let line = &expected[..i + 1]; - expected = &expected[i + 1..]; - expected_headers.insert(line); - if line == "\r\n" { - break - } - } - - let mut expected_len = None; - while expected_headers.len() > 0 { - let mut actual = String::new(); - t!(socket.read_line(&mut actual)); - if actual.starts_with("Content-Length") { - let len = actual.split(": ").skip(1).next().unwrap(); - expected_len = len.trim().parse().ok(); - } - // various versions of libcurl do different things here - if actual == "Proxy-Connection: Keep-Alive\r\n" { - continue - } - if expected_headers.remove(&actual[..]) { - continue - } - - let mut found = None; - for header in expected_headers.iter() { - if lines_match(header, &actual) { - found = Some(header.clone()); - break - } - } - if let Some(found) = found { - expected_headers.remove(&found); - continue - } - panic!("unexpected header: {:?} (remaining headers {:?})", - actual, expected_headers); - } - for header in expected_headers { - panic!("expected header but not found: {:?}", header); - } - - let mut line = String::new(); - let mut socket = match expected_len { - Some(amt) => socket.by_ref().take(amt), - None => socket.by_ref().take(expected.len() as u64), - }; - while socket.limit() > 0 { - line.truncate(0); - t!(socket.read_line(&mut line)); - if line.len() == 0 { - break - } - if expected.len() == 0 { - panic!("unexpected line: {:?}", line); - } - let i = expected.find("\n").unwrap_or(expected.len() - 1); - let expected_line = &expected[..i + 1]; - expected = &expected[i + 1..]; - if lines_match(expected_line, &line) { - continue - } - panic!("lines didn't match:\n\ - expected: {:?}\n\ - actual: {:?}\n", expected_line, line) - } - if expected.len() != 0 { - println!("didn't get expected data: {:?}", expected); - } - } - Message::Write(ref to_write) => { - t!(socket.get_mut().write_all(to_write.as_bytes())); - return - } - } - } - - let mut dst = Vec::new(); - t!(socket.read_to_end(&mut dst)); - assert!(dst.len() == 0); -} - -fn lines_match(expected: &str, mut actual: &str) -> bool { - for (i, part) in expected.split("[..]").enumerate() { - match actual.find(part) { - Some(j) => { - if i == 0 && j != 0 { - return false - } - actual = &actual[j + part.len()..]; - } - None => { - return false - } - } - } - actual.is_empty() || expected.ends_with("[..]") -} - -impl Server { - pub fn new() -> Server { - let listener = t!(TcpListener::bind("127.0.0.1:0")); - let addr = t!(listener.local_addr()); - let (tx, rx) = channel(); - let thread = thread::spawn(move || run(&listener, &rx)); - Server { - messages: Some(tx), - addr: addr, - thread: Some(thread), - } - } - - pub fn receive(&self, msg: &str) { - let msg = msg.replace("$PORT", &self.addr.port().to_string()); - self.msg(Message::Read(msg)); - } - - pub fn send(&self, msg: &str) { - let msg = msg.replace("$PORT", &self.addr.port().to_string()); - self.msg(Message::Write(msg)); - } - - fn msg(&self, msg: Message) { - t!(self.messages.as_ref().unwrap().send(msg)); - } - - pub fn addr(&self) -> &SocketAddr { - &self.addr - } - - pub fn url(&self, path: &str) -> String { - format!("http://{}{}", self.addr, path) - } -} - -impl Drop for Server { - fn drop(&mut self) { - drop(TcpStream::connect(&self.addr)); - drop(self.messages.take()); - let res = self.thread.take().unwrap().join(); - if !thread::panicking() { - t!(res); - } else if let Err(e) = res { - println!("child server thread also failed: {:?}", e); - } - } -} diff -Nru cargo-0.17.0/vendor/curl-0.4.1/.travis.yml cargo-0.19.0/vendor/curl-0.4.1/.travis.yml --- cargo-0.17.0/vendor/curl-0.4.1/.travis.yml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.1/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -language: rust -sudo: required -dist: trusty -services: - - docker - -matrix: - include: - - os: linux - rust: stable - env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64 - - os: linux - rust: stable - env: TARGET=i686-unknown-linux-gnu DOCKER=linux32 - - os: linux - rust: stable - env: TARGET=x86_64-unknown-linux-musl DOCKER=musl - - os: linux - rust: stable - env: TARGET=x86_64-pc-windows-gnu NO_RUN=1 DOCKER=mingw - - os: linux - rust: stable - env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64-curl - - os: osx - rust: stable - env: TARGET=x86_64-apple-darwin - - os: osx - rust: stable - env: TARGET=i686-apple-darwin - - os: linux - rust: beta - env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64 - - os: linux - rust: nightly - env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64 -sudo: false -before_script: - - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH - - curl https://static.rust-lang.org/rustup.sh | - sh -s -- --add-target=$TARGET --disable-sudo -y --prefix=`rustc --print sysroot` -script: - - curl --version - - cargo generate-lockfile - - cargo generate-lockfile --manifest-path systest/Cargo.toml - - if [ -z "$DOCKER" ]; then - sh ci/run.sh; - else - mkdir .cargo target; - docker build -t rust -f ci/Dockerfile-$DOCKER ci; - docker run - -w /src - -v `pwd`:/src:ro - -v `pwd`/target:/src/target - -v `pwd`/ci/.cargo:/src/.cargo:ro - -v `rustc --print sysroot`:/usr/local:ro - -e TARGET=$TARGET - -e NO_RUN=$NO_RUN - -e CARGO_TARGET_DIR=/src/target - -it rust - sh ci/run.sh; - fi -after_success: - - travis-cargo --only nightly doc-upload -notifications: - email: - on_success: never -env: - global: - secure: LL8jOjJgR3txi+YmGqZTiCLqKYCZdHSRrropilczZwm5goTr75BYXStS4AMCsioENPskeazWRBomQZ003Or0nNliFV5VfnG4DK4Z0qCkEe5CpHEda2erF369Wq4hRGPKatbUj7aN36GokmZtaC6XZF/DmYxCE4zTSoMg8KI3Meo= diff -Nru cargo-0.17.0/vendor/curl-0.4.6/appveyor.yml cargo-0.19.0/vendor/curl-0.4.6/appveyor.yml --- cargo-0.17.0/vendor/curl-0.4.6/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/appveyor.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,55 @@ +environment: + matrix: + + # Ensure MinGW works, but we need to download the 32-bit MinGW compiler from a + # custom location. + - TARGET: i686-pc-windows-gnu + MINGW_URL: https://s3.amazonaws.com/rust-lang-ci + MINGW_ARCHIVE: i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z + MINGW_DIR: mingw32 + - TARGET: x86_64-pc-windows-gnu + MSYS_BITS: 64 + + # Ensure vanilla builds work + - TARGET: i686-pc-windows-msvc + - TARGET: x86_64-pc-windows-msvc + + # Pin to specific VS versions to ensure the build works + - TARGET: x86_64-pc-windows-msvc + ARCH: amd64 + VS: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat + - TARGET: x86_64-pc-windows-msvc + ARCH: amd64 + VS: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat + +install: + # Install rust, x86_64-pc-windows-msvc host + - curl -sSf -o rustup-init.exe https://win.rustup.rs/ + - rustup-init.exe -y --default-host x86_64-pc-windows-msvc + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin + + # Install the target we're compiling for + - if NOT "%TARGET%" == "x86_64-pc-windows-msvc" rustup target add %TARGET% + + # Use the system msys if we can + - if defined MSYS_BITS set PATH=C:\msys64\mingw%MSYS_BITS%\bin;C:\msys64\usr\bin;%PATH% + + # download a custom compiler otherwise + - if defined MINGW_URL appveyor DownloadFile %MINGW_URL%/%MINGW_ARCHIVE% + - if defined MINGW_URL 7z x -y %MINGW_ARCHIVE% > nul + - if defined MINGW_URL set PATH=C:\Python27;%CD%\%MINGW_DIR%\bin;C:\msys64\usr\bin;%PATH% + + # If we're pinning to a specific visual studio, do so now + - if defined VS call "%VS%" %ARCH% + + # let's see what we got + - where gcc rustc cargo + - rustc -vV + - cargo -vV + - set CARGO_TARGET_DIR=%CD%\target + +build: false + +test_script: + - cargo test --target %TARGET% + - cargo run --manifest-path systest/Cargo.toml --target %TARGET% diff -Nru cargo-0.17.0/vendor/curl-0.4.6/.cargo-checksum.json cargo-0.19.0/vendor/curl-0.4.6/.cargo-checksum.json --- cargo-0.17.0/vendor/curl-0.4.6/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"aea7ee9b968d1bcb76f04a6a089eceb631b3af6c3d72c001e3ee5fae8052d8d6",".gitmodules":"360dc395ed93285743c292e50bc35cd0cde7882a44f663489e5fe09df2fd0322",".travis.yml":"e6b360b65238bfa081f1399f670768d64f7cbef00d7195b1788ff53a5152f5fd","Cargo.toml":"ed3b0f7036152f8b965c560d4dbbf4206dea47c012b14fdd19fc69a465b8a4ee","LICENSE":"f96def8cba2793fb8582fd12ca6d4dc0ef4ee239e8c3f80e809ec43648da6199","README.md":"66fe8d376d5478e6f7ff3c4d3f8fae69d10493664e776ded83a1c2a14118268c","appveyor.yml":"714bf2b74f6cad8106d490b34785d0e6e8043fa40b5b62dbd5910eb8e68437ab","ci/.cargo/config":"0fc30f27f20cc08c09e3b3107a7b79b0beac992fe66969a744b15900ed5322ab","ci/Dockerfile-linux32":"8f4c3531a36154e2bebf045aed0b9a38bd18ce7142ca7c56ebd2fe551747a5d1","ci/Dockerfile-linux64":"c1e06c4534cb039883950af4442aeb4bc7ca2b112969e3a5b2d7f4ca4728c919","ci/Dockerfile-linux64-curl":"b08eae75dd40b430230d8c225cde0aba88d64192f32d841d7c98648201620537","ci/Dockerfile-mingw":"7fb3093158bbea457b5a6827f337be42b541ea97de851a80e4732b5346b76f05","ci/Dockerfile-musl":"03afb1b28326566a1ee70a66ad0dc5216e6601e642bfbdf120b6403f42786d2a","ci/run.sh":"ad149baac88133d85e211c7e41a79978bc4030d0f22f16d831eef786dbaae3dd","src/easy.rs":"772816c7fd7b1d7667736e3ec99be1ea98295ddf3ea5873fcb94fd41ce65b632","src/error.rs":"06d9ee3cfcf382b098635200b381c742ba80d8fd1c6dfde35ad26e2126f8c8f6","src/lib.rs":"2ec7a9cf3bd188764cadf5e9beff2f433aba3021a9bff745f9349efb609e1d15","src/multi.rs":"7b907e87ae4f3b9c6e8c4a9031327b3b3d0d4a633db9e3ea320e77421c480b1a","src/panic.rs":"4373b2bedb4403fd0cf2059f925a8fdb4a3f4399e83bab817ecd8f7485004f9c","src/version.rs":"cfda858806a50a36c5e58e433a14202401c16941e8677173d964252405c6adce","tests/easy.rs":"54711c01b2fa7a39ca9d9e93047ee08c1396d84ca0f38e76be51a2e03637efd8","tests/formdata":"5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03","tests/multi.rs":"a978f23578ca9d90aae87ba0d72399757dc9745a07564d1aa2bb8c7f1f9dffeb","tests/post.rs":"c4a69b0dc0d5cb11988cd7e93448d45df98b86d90e3d6c7e8c30b6bebac3fd78","tests/server/mod.rs":"4d1d744586caf09a6ac43fdf4c6012c18708e7c788ccf42cbf493b1894a8d8a2"},"package":"c90e1240ef340dd4027ade439e5c7c2064dd9dc652682117bd50d1486a3add7b"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/curl-0.4.6/Cargo.toml cargo-0.19.0/vendor/curl-0.4.6/Cargo.toml --- cargo-0.17.0/vendor/curl-0.4.6/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,34 @@ +[package] + +name = "curl" +version = "0.4.6" +authors = ["Carl Lerche ", + "Alex Crichton "] +license = "MIT" +repository = "https://github.com/alexcrichton/curl-rust" +homepage = "https://github.com/alexcrichton/curl-rust" +documentation = "https://docs.rs/curl" +description = "Rust bindings to libcurl for making HTTP requests" +categories = ["api-bindings", "web-programming::http-client"] + +[badges] +travis-ci = { repository = "alexcrichton/curl-rust" } +appveyor = { repository = "alexcrichton/curl-rust" } + +[dependencies] +libc = "0.2" +curl-sys = { path = "curl-sys", version = "0.3.10" } + +# Unix platforms use OpenSSL for now to provide SSL functionality +[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies] +openssl-sys = "0.9.0" +openssl-probe = "0.1" + +[target."cfg(windows)".dependencies] +winapi = "0.2" + +[dev-dependencies] +mio = "0.6" + +[workspace] +members = ["systest"] diff -Nru cargo-0.17.0/vendor/curl-0.4.6/ci/.cargo/config cargo-0.19.0/vendor/curl-0.4.6/ci/.cargo/config --- cargo-0.17.0/vendor/curl-0.4.6/ci/.cargo/config 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/ci/.cargo/config 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,2 @@ +[target.x86_64-pc-windows-gnu] +linker = "x86_64-w64-mingw32-gcc" diff -Nru cargo-0.17.0/vendor/curl-0.4.6/ci/Dockerfile-linux32 cargo-0.19.0/vendor/curl-0.4.6/ci/Dockerfile-linux32 --- cargo-0.17.0/vendor/curl-0.4.6/ci/Dockerfile-linux32 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/ci/Dockerfile-linux32 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,10 @@ +FROM ubuntu:16.04 + +RUN dpkg --add-architecture i386 +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc-multilib ca-certificates make libc6-dev \ + libssl-dev:i386 pkg-config + +ENV PKG_CONFIG=i686-linux-gnu-pkg-config \ + PKG_CONFIG_ALLOW_CROSS=1 diff -Nru cargo-0.17.0/vendor/curl-0.4.6/ci/Dockerfile-linux64 cargo-0.19.0/vendor/curl-0.4.6/ci/Dockerfile-linux64 --- cargo-0.17.0/vendor/curl-0.4.6/ci/Dockerfile-linux64 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/ci/Dockerfile-linux64 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,9 @@ +FROM ubuntu:16.04 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc ca-certificates make libc6-dev \ + libssl-dev \ + pkg-config + +ENV FEATURES="http2" \ No newline at end of file diff -Nru cargo-0.17.0/vendor/curl-0.4.6/ci/Dockerfile-linux64-curl cargo-0.19.0/vendor/curl-0.4.6/ci/Dockerfile-linux64-curl --- cargo-0.17.0/vendor/curl-0.4.6/ci/Dockerfile-linux64-curl 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/ci/Dockerfile-linux64-curl 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,6 @@ +FROM ubuntu:14.04 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc ca-certificates make libc6-dev \ + libssl-dev libcurl4-openssl-dev pkg-config diff -Nru cargo-0.17.0/vendor/curl-0.4.6/ci/Dockerfile-mingw cargo-0.19.0/vendor/curl-0.4.6/ci/Dockerfile-mingw --- cargo-0.17.0/vendor/curl-0.4.6/ci/Dockerfile-mingw 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/ci/Dockerfile-mingw 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,6 @@ +FROM ubuntu:16.04 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc ca-certificates make libc6-dev \ + gcc-mingw-w64-x86-64 libz-mingw-w64-dev diff -Nru cargo-0.17.0/vendor/curl-0.4.6/ci/Dockerfile-musl cargo-0.19.0/vendor/curl-0.4.6/ci/Dockerfile-musl --- cargo-0.17.0/vendor/curl-0.4.6/ci/Dockerfile-musl 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/ci/Dockerfile-musl 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,18 @@ +FROM ubuntu:16.04 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc ca-certificates make libc6-dev curl \ + musl-tools + +RUN \ + curl https://www.openssl.org/source/old/1.0.2/openssl-1.0.2g.tar.gz | tar xzf - && \ + cd openssl-1.0.2g && \ + CC=musl-gcc ./Configure --prefix=/openssl no-dso linux-x86_64 -fPIC && \ + make -j10 && \ + make install && \ + cd .. && \ + rm -rf openssl-1.0.2g + +ENV OPENSSL_STATIC=1 \ + OPENSSL_DIR=/openssl diff -Nru cargo-0.17.0/vendor/curl-0.4.6/ci/run.sh cargo-0.19.0/vendor/curl-0.4.6/ci/run.sh --- cargo-0.17.0/vendor/curl-0.4.6/ci/run.sh 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/ci/run.sh 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,16 @@ +#!/bin/sh + +set -ex + +cargo test --target $TARGET --no-run +if [ -z "$NO_RUN" ]; then + cargo test --target $TARGET + cargo run --manifest-path systest/Cargo.toml --target $TARGET + cargo doc --no-deps + cargo doc --no-deps -p curl-sys +fi + +if [ -n "$FEATURES" ] +then + cargo run --manifest-path systest/Cargo.toml --target $TARGET --features "$FEATURES" +fi \ No newline at end of file diff -Nru cargo-0.17.0/vendor/curl-0.4.6/.gitignore cargo-0.19.0/vendor/curl-0.4.6/.gitignore --- cargo-0.17.0/vendor/curl-0.4.6/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/.gitignore 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,2 @@ +Cargo.lock +target/ diff -Nru cargo-0.17.0/vendor/curl-0.4.6/.gitmodules cargo-0.19.0/vendor/curl-0.4.6/.gitmodules --- cargo-0.17.0/vendor/curl-0.4.6/.gitmodules 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/.gitmodules 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,3 @@ +[submodule "curl-sys/curl"] + path = curl-sys/curl + url = https://github.com/alexcrichton/curl diff -Nru cargo-0.17.0/vendor/curl-0.4.6/LICENSE cargo-0.19.0/vendor/curl-0.4.6/LICENSE --- cargo-0.17.0/vendor/curl-0.4.6/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/LICENSE 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,19 @@ +Copyright (c) 2014 Carl Lerche + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/curl-0.4.6/README.md cargo-0.19.0/vendor/curl-0.4.6/README.md --- cargo-0.17.0/vendor/curl-0.4.6/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/README.md 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,137 @@ +# curl-rust + +libcurl bindings for Rust + +[![Build Status](https://travis-ci.org/alexcrichton/curl-rust.svg?branch=master)](https://travis-ci.org/alexcrichton/curl-rust) +[![Build status](https://ci.appveyor.com/api/projects/status/lx98wtbxhhhajpr9?svg=true)](https://ci.appveyor.com/project/alexcrichton/curl-rust) + +[Documentation](https://docs.rs/curl) + +## Quick Start + +```rust +extern crate curl; + +use std::io::{stdout, Write}; + +use curl::easy::Easy; + +// Print a web page onto stdout +fn main() { + let mut easy = Easy::new(); + easy.url("https://www.rust-lang.org/").unwrap(); + easy.write_function(|data| { + Ok(stdout().write(data).unwrap()) + }).unwrap(); + easy.perform().unwrap(); + + println!("{}", easy.response_code().unwrap()); +} +``` + +```rust +extern crate curl; + +use curl::easy::Easy; + +// Capture output into a local `Vec`. +fn main() { + let mut dst = Vec::new(); + let mut easy = Easy::new(); + easy.url("https://www.rust-lang.org/").unwrap(); + + let mut transfer = easy.transfer(); + transfer.write_function(|data| { + dst.extend_from_slice(data); + Ok(data.len()) + }).unwrap(); + transfer.perform().unwrap(); +} +``` + +## Post / Put requests + +The `put` and `post` methods on `Easy` can configure the method of the HTTP +request, and then `read_function` can be used to specify how data is filled in. +This interface works particularly well with types that implement `Read`. + +```rust,no_run +extern crate curl; + +use std::io::Read; +use curl::easy::Easy; + +fn main() { + let mut data = "this is the body".as_bytes(); + + let mut easy = Easy::new(); + easy.url("http://www.example.com/upload").unwrap(); + easy.post(true).unwrap(); + easy.post_field_size(data.len() as u64).unwrap(); + + let mut transfer = easy.transfer(); + transfer.read_function(|buf| { + Ok(data.read(buf).unwrap_or(0)) + }).unwrap(); + transfer.perform().unwrap(); +} +``` + +## Custom headers + +Custom headers can be specified as part of the request: + +```rust,no_run +extern crate curl; + +use curl::easy::{Easy, List}; + +fn main() { + let mut easy = Easy::new(); + easy.url("http://www.example.com").unwrap(); + + let mut list = List::new(); + list.append("Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==").unwrap(); + easy.http_headers(list).unwrap(); + easy.perform().unwrap(); +} +``` + +## Keep alive + +The handle can be re-used across multiple requests. Curl will attempt to +keep the connections alive. + +```rust,no_run +extern crate curl; + +use curl::easy::Easy; + +fn main() { + let mut handle = Easy::new(); + + handle.url("http://www.example.com/foo").unwrap(); + handle.perform().unwrap(); + + handle.url("http://www.example.com/bar").unwrap(); + handle.perform().unwrap(); +} +``` + +## Multiple requests + +The libcurl library provides support for sending multiple requests +simultaneously through the "multi" interface. This is currently bound in the +`multi` module of this crate and provides the ability to execute multiple +transfers simultaneously. For more information, see that module. + +## Version Support + +The bindings have been developed using curl version 7.24.0. They should +work with any newer version of curl and possibly with older versions, +but this has not been tested. + +## License + +The `curl-rust` crate is licensed under the MIT license, see `LICENSE` for more +details. diff -Nru cargo-0.17.0/vendor/curl-0.4.6/src/easy.rs cargo-0.19.0/vendor/curl-0.4.6/src/easy.rs --- cargo-0.17.0/vendor/curl-0.4.6/src/easy.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/src/easy.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,3949 @@ +//! Bindings to the "easy" libcurl API. +//! +//! This module contains some simple types like `Easy` and `List` which are just +//! wrappers around the corresponding libcurl types. There's also a few enums +//! scattered about for various options here and there. +//! +//! Most simple usage of libcurl will likely use the `Easy` structure here, and +//! you can find more docs about its usage on that struct. + +use std::cell::{RefCell, Cell}; +use std::ffi::{CString, CStr}; +use std::io::SeekFrom; +use std::path::Path; +use std::slice; +use std::str; +use std::time::Duration; + +use curl_sys; +use libc::{self, c_long, c_int, c_char, c_void, size_t, c_double, c_ulong}; + +use {Error, FormError}; +use panic; + +// TODO: checked casts everywhere + +/// Raw bindings to a libcurl "easy session". +/// +/// This type corresponds to the `CURL` type in libcurl, and is probably what +/// you want for just sending off a simple HTTP request and fetching a response. +/// Each easy handle can be thought of as a large builder before calling the +/// final `perform` function. +/// +/// There are many many configuration options for each `Easy` handle, and they +/// should all have their own documentation indicating what it affects and how +/// it interacts with other options. Some implementations of libcurl can use +/// this handle to interact with many different protocols, although by default +/// this crate only guarantees the HTTP/HTTPS protocols working. +/// +/// Note that almost all methods on this structure which configure various +/// properties return a `Result`. This is largely used to detect whether the +/// underlying implementation of libcurl actually implements the option being +/// requested. If you're linked to a version of libcurl which doesn't support +/// the option, then an error will be returned. Some options also perform some +/// validation when they're set, and the error is returned through this vector. +/// +/// ## Examples +/// +/// Creating a handle which can be used later +/// +/// ``` +/// use curl::easy::Easy; +/// +/// let handle = Easy::new(); +/// ``` +/// +/// Send an HTTP request, writing the response to stdout. +/// +/// ``` +/// use std::io::{stdout, Write}; +/// +/// use curl::easy::Easy; +/// +/// let mut handle = Easy::new(); +/// handle.url("https://www.rust-lang.org/").unwrap(); +/// handle.write_function(|data| { +/// Ok(stdout().write(data).unwrap()) +/// }).unwrap(); +/// handle.perform().unwrap(); +/// ``` +/// +/// Collect all output of an HTTP request to a vector. +/// +/// ``` +/// use curl::easy::Easy; +/// +/// let mut data = Vec::new(); +/// let mut handle = Easy::new(); +/// handle.url("https://www.rust-lang.org/").unwrap(); +/// { +/// let mut transfer = handle.transfer(); +/// transfer.write_function(|new_data| { +/// data.extend_from_slice(new_data); +/// Ok(new_data.len()) +/// }).unwrap(); +/// transfer.perform().unwrap(); +/// } +/// println!("{:?}", data); +/// ``` +/// +/// More examples of various properties of an HTTP request can be found on the +/// specific methods as well. +pub struct Easy { + handle: *mut curl_sys::CURL, + data: Box, +} + +/// A scoped transfer of information which borrows an `Easy` and allows +/// referencing stack-local data of the lifetime `'data`. +/// +/// Usage of `Easy` requires the `'static` and `Send` bounds on all callbacks +/// registered, but that's not often wanted if all you need is to collect a +/// bunch of data in memory to a vector, for example. The `Transfer` structure, +/// created by the `Easy::transfer` method, is used for this sort of request. +/// +/// The callbacks attached to a `Transfer` are only active for that one transfer +/// object, and they allow to elide both the `Send` and `'static` bounds to +/// close over stack-local information. +pub struct Transfer<'easy, 'data> { + easy: &'easy mut Easy, + data: Box>, +} + +#[derive(Default)] +struct EasyData { + running: Cell, + write: Option Result + Send>>, + read: Option Result + Send>>, + seek: Option SeekResult + Send>>, + debug_set: bool, + debug: Option>, + header: Option bool + Send>>, + progress: Option bool + Send>>, + ssl_ctx: Option Result<(), Error> + Send>>, + header_list: Option, + form: Option, + error_buf: RefCell>, +} + +#[derive(Default)] +struct TransferData<'a> { + write: Option Result + 'a>>, + read: Option Result + 'a>>, + seek: Option SeekResult + 'a>>, + debug: Option>, + header: Option bool + 'a>>, + progress: Option bool + 'a>>, + ssl_ctx: Option Result<(), Error> + 'a>>, +} + +// libcurl guarantees that a CURL handle is fine to be transferred so long as +// it's not used concurrently, and we do that correctly ourselves. +unsafe impl Send for Easy {} + +/// Multipart/formdata for an HTTP POST request. +/// +/// This structure is built up and then passed to the `Easy::httppost` method to +/// be sent off with a request. +pub struct Form { + head: *mut curl_sys::curl_httppost, + tail: *mut curl_sys::curl_httppost, + headers: Vec, + buffers: Vec>, + strings: Vec, +} + +/// One part in a multipart upload, added to a `Form`. +pub struct Part<'form, 'data> { + form: &'form mut Form, + name: &'data str, + array: Vec, + error: Option, +} + +/// Possible proxy types that libcurl currently understands. +#[allow(missing_docs)] +pub enum ProxyType { + Http = curl_sys::CURLPROXY_HTTP as isize, + Http1 = curl_sys::CURLPROXY_HTTP_1_0 as isize, + Socks4 = curl_sys::CURLPROXY_SOCKS4 as isize, + Socks5 = curl_sys::CURLPROXY_SOCKS5 as isize, + Socks4a = curl_sys::CURLPROXY_SOCKS4A as isize, + Socks5Hostname = curl_sys::CURLPROXY_SOCKS5_HOSTNAME as isize, + + /// Hidden variant to indicate that this enum should not be matched on, it + /// may grow over time. + #[doc(hidden)] + __Nonexhaustive, +} + +/// Possible conditions for the `time_condition` method. +#[allow(missing_docs)] +pub enum TimeCondition { + None = curl_sys::CURL_TIMECOND_NONE as isize, + IfModifiedSince = curl_sys::CURL_TIMECOND_IFMODSINCE as isize, + IfUnmodifiedSince = curl_sys::CURL_TIMECOND_IFUNMODSINCE as isize, + LastModified = curl_sys::CURL_TIMECOND_LASTMOD as isize, + + /// Hidden variant to indicate that this enum should not be matched on, it + /// may grow over time. + #[doc(hidden)] + __Nonexhaustive, +} + +/// Possible values to pass to the `ip_resolve` method. +#[allow(missing_docs)] +pub enum IpResolve { + V4 = curl_sys::CURL_IPRESOLVE_V4 as isize, + V6 = curl_sys::CURL_IPRESOLVE_V6 as isize, + Any = curl_sys::CURL_IPRESOLVE_WHATEVER as isize, + + /// Hidden variant to indicate that this enum should not be matched on, it + /// may grow over time. + #[doc(hidden)] + __Nonexhaustive = 500, +} + +/// Possible values to pass to the `http_version` method. +pub enum HttpVersion { + /// We don't care what http version to use, and we'd like the library to + /// choose the best possible for us. + Any = curl_sys::CURL_HTTP_VERSION_NONE as isize, + + /// Please use HTTP 1.0 in the request + V10 = curl_sys::CURL_HTTP_VERSION_1_0 as isize, + + /// Please use HTTP 1.1 in the request + V11 = curl_sys::CURL_HTTP_VERSION_1_1 as isize, + + /// Please use HTTP 2 in the request + /// (Added in CURL 7.33.0) + V2 = curl_sys::CURL_HTTP_VERSION_2_0 as isize, + + /// Use version 2 for HTTPS, version 1.1 for HTTP + /// (Added in CURL 7.47.0) + V2TLS = curl_sys::CURL_HTTP_VERSION_2TLS as isize, + + /// Please use HTTP 2 without HTTP/1.1 Upgrade + /// (Added in CURL 7.49.0) + V2PriorKnowledge = curl_sys::CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE as isize, + + /// Hidden variant to indicate that this enum should not be matched on, it + /// may grow over time. + #[doc(hidden)] + __Nonexhaustive = 500, +} + +/// Possible values to pass to the `ip_resolve` method. +#[allow(missing_docs)] +pub enum SslVersion { + Default = curl_sys::CURL_SSLVERSION_DEFAULT as isize, + Tlsv1 = curl_sys::CURL_SSLVERSION_TLSv1 as isize, + Sslv2 = curl_sys::CURL_SSLVERSION_SSLv2 as isize, + Sslv3 = curl_sys::CURL_SSLVERSION_SSLv3 as isize, + // Tlsv10 = curl_sys::CURL_SSLVERSION_TLSv1_0 as isize, + // Tlsv11 = curl_sys::CURL_SSLVERSION_TLSv1_1 as isize, + // Tlsv12 = curl_sys::CURL_SSLVERSION_TLSv1_2 as isize, + + /// Hidden variant to indicate that this enum should not be matched on, it + /// may grow over time. + #[doc(hidden)] + __Nonexhaustive = 500, +} + +/// Possible return values from the `seek_function` callback. +pub enum SeekResult { + /// Indicates that the seek operation was a success + Ok = curl_sys::CURL_SEEKFUNC_OK as isize, + + /// Indicates that the seek operation failed, and the entire request should + /// fail as a result. + Fail = curl_sys::CURL_SEEKFUNC_FAIL as isize, + + /// Indicates that although the seek failed libcurl should attempt to keep + /// working if possible (for example "seek" through reading). + CantSeek = curl_sys::CURL_SEEKFUNC_CANTSEEK as isize, + + /// Hidden variant to indicate that this enum should not be matched on, it + /// may grow over time. + #[doc(hidden)] + __Nonexhaustive = 500, +} + +/// Possible data chunks that can be witnessed as part of the `debug_function` +/// callback. +pub enum InfoType { + /// The data is informational text. + Text, + + /// The data is header (or header-like) data received from the peer. + HeaderIn, + + /// The data is header (or header-like) data sent to the peer. + HeaderOut, + + /// The data is protocol data received from the peer. + DataIn, + + /// The data is protocol data sent to the peer. + DataOut, + + /// The data is SSL/TLS (binary) data received from the peer. + SslDataIn, + + /// The data is SSL/TLS (binary) data sent to the peer. + SslDataOut, + + /// Hidden variant to indicate that this enum should not be matched on, it + /// may grow over time. + #[doc(hidden)] + __Nonexhaustive, +} + +/// A linked list of a strings +pub struct List { + raw: *mut curl_sys::curl_slist, +} + +/// An iterator over `List` +pub struct Iter<'a> { + _me: &'a List, + cur: *mut curl_sys::curl_slist, +} + +unsafe impl Send for List {} + +/// Possible error codes that can be returned from the `read_function` callback. +pub enum ReadError { + /// Indicates that the connection should be aborted immediately + Abort, + + /// Indicates that reading should be paused until `unpause` is called. + Pause, + + /// Hidden variant to indicate that this enum should not be matched on, it + /// may grow over time. + #[doc(hidden)] + __Nonexhaustive, +} + +/// Possible error codes that can be returned from the `write_function` callback. +pub enum WriteError { + /// Indicates that reading should be paused until `unpause` is called. + Pause, + + /// Hidden variant to indicate that this enum should not be matched on, it + /// may grow over time. + #[doc(hidden)] + __Nonexhaustive, +} + +/// Options for `.netrc` parsing. +pub enum NetRc { + /// Ignoring `.netrc` file and use information from url + /// + /// This option is default + Ignored = curl_sys::CURL_NETRC_IGNORED as isize, + + /// The use of your `~/.netrc` file is optional, and information in the URL is to be + /// preferred. The file will be scanned for the host and user name (to find the password only) + /// or for the host only, to find the first user name and password after that machine, which + /// ever information is not specified in the URL. + Optional = curl_sys::CURL_NETRC_OPTIONAL as isize, + + /// This value tells the library that use of the file is required, to ignore the information in + /// the URL, and to search the file for the host only. + Required = curl_sys::CURL_NETRC_REQUIRED as isize, +} + +/// Structure which stores possible authentication methods to get passed to +/// `http_auth` and `proxy_auth`. +#[derive(Clone, Debug)] +pub struct Auth { + bits: c_long, +} + +/// Structure which stores possible ssl options to pass to `ssl_options`. +#[derive(Clone, Debug)] +pub struct SslOpt { + bits: c_long, +} + +impl Easy { + /// Creates a new "easy" handle which is the core of almost all operations + /// in libcurl. + /// + /// To use a handle, applications typically configure a number of options + /// followed by a call to `perform`. Options are preserved across calls to + /// `perform` and need to be reset manually (or via the `reset` method) if + /// this is not desired. + pub fn new() -> Easy { + ::init(); + unsafe { + let handle = curl_sys::curl_easy_init(); + assert!(!handle.is_null()); + let mut ret = Easy { + handle: handle, + data: Default::default(), + }; + default_configure(&mut ret); + return ret + } + } + + // ========================================================================= + // Behavior options + + /// Configures this handle to have verbose output to help debug protocol + /// information. + /// + /// By default output goes to stderr, but the `stderr` function on this type + /// can configure that. You can also use the `debug_function` method to get + /// all protocol data sent and received. + /// + /// By default, this option is `false`. + pub fn verbose(&mut self, verbose: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_VERBOSE, verbose as c_long) + } + + /// Indicates whether header information is streamed to the output body of + /// this request. + /// + /// This option is only relevant for protocols which have header metadata + /// (like http or ftp). It's not generally possible to extract headers + /// from the body if using this method, that use case should be intended for + /// the `header_function` method. + /// + /// To set HTTP headers, use the `http_header` method. + /// + /// By default, this option is `false` and corresponds to + /// `CURLOPT_HEADER`. + pub fn show_header(&mut self, show: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_HEADER, show as c_long) + } + + /// Indicates whether a progress meter will be shown for requests done with + /// this handle. + /// + /// This will also prevent the `progress_function` from being called. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_NOPROGRESS`. + pub fn progress(&mut self, progress: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_NOPROGRESS, + (!progress) as c_long) + } + + /// Inform libcurl whether or not it should install signal handlers or + /// attempt to use signals to perform library functions. + /// + /// If this option is disabled then timeouts during name resolution will not + /// work unless libcurl is built against c-ares. Note that enabling this + /// option, however, may not cause libcurl to work with multiple threads. + /// + /// By default this option is `false` and corresponds to `CURLOPT_NOSIGNAL`. + /// Note that this default is **different than libcurl** as it is intended + /// that this library is threadsafe by default. See the [libcurl docs] for + /// some more information. + /// + /// [libcurl docs]: https://curl.haxx.se/libcurl/c/threadsafe.html + pub fn signal(&mut self, signal: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_NOSIGNAL, + (!signal) as c_long) + } + + /// Indicates whether multiple files will be transferred based on the file + /// name pattern. + /// + /// The last part of a filename uses fnmatch-like pattern matching. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_WILDCARDMATCH`. + pub fn wildcard_match(&mut self, m: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_WILDCARDMATCH, m as c_long) + } + + // ========================================================================= + // Callback options + + /// Set callback for writing received data. + /// + /// This callback function gets called by libcurl as soon as there is data + /// received that needs to be saved. + /// + /// The callback function will be passed as much data as possible in all + /// invokes, but you must not make any assumptions. It may be one byte, it + /// may be thousands. If `show_header` is enabled, which makes header data + /// get passed to the write callback, you can get up to + /// `CURL_MAX_HTTP_HEADER` bytes of header data passed into it. This + /// usually means 100K. + /// + /// This function may be called with zero bytes data if the transferred file + /// is empty. + /// + /// The callback should return the number of bytes actually taken care of. + /// If that amount differs from the amount passed to your callback function, + /// it'll signal an error condition to the library. This will cause the + /// transfer to get aborted and the libcurl function used will return + /// an error with `is_write_error`. + /// + /// If your callback function returns `Err(WriteError::Pause)` it will cause + /// this transfer to become paused. See `unpause_write` for further details. + /// + /// By default data is sent into the void, and this corresponds to the + /// `CURLOPT_WRITEFUNCTION` and `CURLOPT_WRITEDATA` options. + /// + /// Note that the lifetime bound on this function is `'static`, but that + /// is often too restrictive. To use stack data consider calling the + /// `transfer` method and then using `write_function` to configure a + /// callback that can reference stack-local data. + /// + /// # Examples + /// + /// ``` + /// use std::io::{stdout, Write}; + /// use curl::easy::Easy; + /// + /// let mut handle = Easy::new(); + /// handle.url("https://www.rust-lang.org/").unwrap(); + /// handle.write_function(|data| { + /// Ok(stdout().write(data).unwrap()) + /// }).unwrap(); + /// handle.perform().unwrap(); + /// ``` + /// + /// Writing to a stack-local buffer + /// + /// ``` + /// use std::io::{stdout, Write}; + /// use curl::easy::Easy; + /// + /// let mut buf = Vec::new(); + /// let mut handle = Easy::new(); + /// handle.url("https://www.rust-lang.org/").unwrap(); + /// + /// let mut transfer = handle.transfer(); + /// transfer.write_function(|data| { + /// buf.extend_from_slice(data); + /// Ok(data.len()) + /// }).unwrap(); + /// transfer.perform().unwrap(); + /// ``` + pub fn write_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(&[u8]) -> Result + Send + 'static + { + self.data.write = Some(Box::new(f)); + unsafe { + return self.set_write_function(easy_write_cb, + &*self.data as *const _ as *mut _) + } + } + + unsafe fn set_write_function(&self, + cb: curl_sys::curl_write_callback, + ptr: *mut c_void) + -> Result<(), Error> { + try!(self.setopt_ptr(curl_sys::CURLOPT_WRITEFUNCTION, cb as *const _)); + try!(self.setopt_ptr(curl_sys::CURLOPT_WRITEDATA, ptr as *const _)); + return Ok(()); + } + + /// Read callback for data uploads. + /// + /// This callback function gets called by libcurl as soon as it needs to + /// read data in order to send it to the peer - like if you ask it to upload + /// or post data to the server. + /// + /// Your function must then return the actual number of bytes that it stored + /// in that memory area. Returning 0 will signal end-of-file to the library + /// and cause it to stop the current transfer. + /// + /// If you stop the current transfer by returning 0 "pre-maturely" (i.e + /// before the server expected it, like when you've said you will upload N + /// bytes and you upload less than N bytes), you may experience that the + /// server "hangs" waiting for the rest of the data that won't come. + /// + /// The read callback may return `Err(ReadError::Abort)` to stop the + /// current operation immediately, resulting in a `is_aborted_by_callback` + /// error code from the transfer. + /// + /// The callback can return `Err(ReadError::Pause)` to cause reading from + /// this connection to pause. See `unpause_read` for further details. + /// + /// By default data not input, and this corresponds to the + /// `CURLOPT_READFUNCTION` and `CURLOPT_READDATA` options. + /// + /// Note that the lifetime bound on this function is `'static`, but that + /// is often too restrictive. To use stack data consider calling the + /// `transfer` method and then using `read_function` to configure a + /// callback that can reference stack-local data. + /// + /// # Examples + /// + /// Read input from stdin + /// + /// ```no_run + /// use std::io::{stdin, Read}; + /// use curl::easy::Easy; + /// + /// let mut handle = Easy::new(); + /// handle.url("https://example.com/login").unwrap(); + /// handle.read_function(|into| { + /// Ok(stdin().read(into).unwrap()) + /// }).unwrap(); + /// handle.post(true).unwrap(); + /// handle.perform().unwrap(); + /// ``` + /// + /// Reading from stack-local data: + /// + /// ```no_run + /// use std::io::{stdin, Read}; + /// use curl::easy::Easy; + /// + /// let mut data_to_upload = &b"foobar"[..]; + /// let mut handle = Easy::new(); + /// handle.url("https://example.com/login").unwrap(); + /// handle.post(true).unwrap(); + /// + /// let mut transfer = handle.transfer(); + /// transfer.read_function(|into| { + /// Ok(data_to_upload.read(into).unwrap()) + /// }).unwrap(); + /// transfer.perform().unwrap(); + /// ``` + pub fn read_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(&mut [u8]) -> Result + Send + 'static + { + self.data.read = Some(Box::new(f)); + unsafe { + self.set_read_function(easy_read_cb, + &*self.data as *const _ as *mut _) + } + } + + unsafe fn set_read_function(&self, + cb: curl_sys::curl_read_callback, + ptr: *mut c_void) -> Result<(), Error> { + try!(self.setopt_ptr(curl_sys::CURLOPT_READFUNCTION, cb as *const _)); + try!(self.setopt_ptr(curl_sys::CURLOPT_READDATA, ptr as *const _)); + return Ok(()); + } + + /// User callback for seeking in input stream. + /// + /// This function gets called by libcurl to seek to a certain position in + /// the input stream and can be used to fast forward a file in a resumed + /// upload (instead of reading all uploaded bytes with the normal read + /// function/callback). It is also called to rewind a stream when data has + /// already been sent to the server and needs to be sent again. This may + /// happen when doing a HTTP PUT or POST with a multi-pass authentication + /// method, or when an existing HTTP connection is reused too late and the + /// server closes the connection. + /// + /// The callback function must return `SeekResult::Ok` on success, + /// `SeekResult::Fail` to cause the upload operation to fail or + /// `SeekResult::CantSeek` to indicate that while the seek failed, libcurl + /// is free to work around the problem if possible. The latter can sometimes + /// be done by instead reading from the input or similar. + /// + /// By default data this option is not set, and this corresponds to the + /// `CURLOPT_SEEKFUNCTION` and `CURLOPT_SEEKDATA` options. + /// + /// Note that the lifetime bound on this function is `'static`, but that + /// is often too restrictive. To use stack data consider calling the + /// `transfer` method and then using `seek_function` to configure a + /// callback that can reference stack-local data. + pub fn seek_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(SeekFrom) -> SeekResult + Send + 'static + { + self.data.seek = Some(Box::new(f)); + unsafe { + self.set_seek_function(easy_seek_cb, + &*self.data as *const _ as *mut _) + } + } + + unsafe fn set_seek_function(&self, + cb: curl_sys::curl_seek_callback, + ptr: *mut c_void) -> Result<(), Error> { + let cb = cb as curl_sys::curl_seek_callback; + try!(self.setopt_ptr(curl_sys::CURLOPT_SEEKFUNCTION, cb as *const _)); + try!(self.setopt_ptr(curl_sys::CURLOPT_SEEKDATA, ptr as *const _)); + Ok(()) + } + + /// Callback to progress meter function + /// + /// This function gets called by libcurl instead of its internal equivalent + /// with a frequent interval. While data is being transferred it will be + /// called very frequently, and during slow periods like when nothing is + /// being transferred it can slow down to about one call per second. + /// + /// The callback gets told how much data libcurl will transfer and has + /// transferred, in number of bytes. The first argument is the total number + /// of bytes libcurl expects to download in this transfer. The second + /// argument is the number of bytes downloaded so far. The third argument is + /// the total number of bytes libcurl expects to upload in this transfer. + /// The fourth argument is the number of bytes uploaded so far. + /// + /// Unknown/unused argument values passed to the callback will be set to + /// zero (like if you only download data, the upload size will remain 0). + /// Many times the callback will be called one or more times first, before + /// it knows the data sizes so a program must be made to handle that. + /// + /// Returning `false` from this callback will cause libcurl to abort the + /// transfer and return `is_aborted_by_callback`. + /// + /// If you transfer data with the multi interface, this function will not be + /// called during periods of idleness unless you call the appropriate + /// libcurl function that performs transfers. + /// + /// `noprogress` must be set to 0 to make this function actually get + /// called. + /// + /// By default this function calls an internal method and corresponds to + /// `CURLOPT_PROGRESSFUNCTION` and `CURLOPT_PROGRESSDATA`. + /// + /// Note that the lifetime bound on this function is `'static`, but that + /// is often too restrictive. To use stack data consider calling the + /// `transfer` method and then using `progress_function` to configure a + /// callback that can reference stack-local data. + pub fn progress_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(f64, f64, f64, f64) -> bool + Send + 'static + { + self.data.progress = Some(Box::new(f)); + unsafe { + self.set_progress_function(easy_progress_cb, + &*self.data as *const _ as *mut _) + } + } + + unsafe fn set_progress_function(&self, + cb: curl_sys::curl_progress_callback, + ptr: *mut c_void) -> Result<(), Error> { + try!(self.setopt_ptr(curl_sys::CURLOPT_PROGRESSFUNCTION, cb as *const _)); + try!(self.setopt_ptr(curl_sys::CURLOPT_PROGRESSDATA, ptr as *const _)); + Ok(()) + } + + /// Callback to SSL context + /// + /// This callback function gets called by libcurl just before the + /// initialization of an SSL connection after having processed all + /// other SSL related options to give a last chance to an + /// application to modify the behaviour of the SSL + /// initialization. The `ssl_ctx` parameter is actually a pointer + /// to the SSL library's SSL_CTX. If an error is returned from the + /// callback no attempt to establish a connection is made and the + /// perform operation will return the callback's error code. + /// + /// This function will get called on all new connections made to a + /// server, during the SSL negotiation. The SSL_CTX pointer will + /// be a new one every time. + /// + /// To use this properly, a non-trivial amount of knowledge of + /// your SSL library is necessary. For example, you can use this + /// function to call library-specific callbacks to add additional + /// validation code for certificates, and even to change the + /// actual URI of a HTTPS request. + /// + /// By default this function calls an internal method and + /// corresponds to `CURLOPT_SSL_CTX_FUNCTION` and + /// `CURLOPT_SSL_CTX_DATA`. + /// + /// Note that the lifetime bound on this function is `'static`, but that + /// is often too restrictive. To use stack data consider calling the + /// `transfer` method and then using `progress_function` to configure a + /// callback that can reference stack-local data. + pub fn ssl_ctx_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(*mut c_void) -> Result<(), Error> + Send + 'static + { + self.data.ssl_ctx = Some(Box::new(f)); + unsafe { + self.set_ssl_ctx_function(easy_ssl_ctx_cb, + &*self.data as *const _ as *mut _) + } + } + + unsafe fn set_ssl_ctx_function(&self, + cb: curl_sys::curl_ssl_ctx_callback, + ptr: *mut c_void) -> Result<(), Error> { + try!(self.setopt_ptr(curl_sys::CURLOPT_SSL_CTX_FUNCTION, cb as *const _)); + try!(self.setopt_ptr(curl_sys::CURLOPT_SSL_CTX_DATA, ptr as *const _)); + Ok(()) + } + + /// Specify a debug callback + /// + /// `debug_function` replaces the standard debug function used when + /// `verbose` is in effect. This callback receives debug information, + /// as specified in the type argument. + /// + /// By default this option is not set and corresponds to the + /// `CURLOPT_DEBUGFUNCTION` and `CURLOPT_DEBUGDATA` options. + /// + /// Note that the lifetime bound on this function is `'static`, but that + /// is often too restrictive. To use stack data consider calling the + /// `transfer` method and then using `debug_function` to configure a + /// callback that can reference stack-local data. + pub fn debug_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(InfoType, &[u8]) + Send + 'static + { + self.data.debug = Some(Box::new(f)); + self.data.debug_set = true; + unsafe { + self.set_debug_function(easy_debug_cb, + &*self.data as *const _ as *mut _) + } + } + + unsafe fn set_debug_function(&self, + cb: curl_sys::curl_debug_callback, + ptr: *mut c_void) -> Result<(), Error> { + try!(self.setopt_ptr(curl_sys::CURLOPT_DEBUGFUNCTION, cb as *const _)); + try!(self.setopt_ptr(curl_sys::CURLOPT_DEBUGDATA, ptr as *const _)); + return Ok(()); + } + + /// Callback that receives header data + /// + /// This function gets called by libcurl as soon as it has received header + /// data. The header callback will be called once for each header and only + /// complete header lines are passed on to the callback. Parsing headers is + /// very easy using this. If this callback returns `false` it'll signal an + /// error to the library. This will cause the transfer to get aborted and + /// the libcurl function in progress will return `is_write_error`. + /// + /// A complete HTTP header that is passed to this function can be up to + /// CURL_MAX_HTTP_HEADER (100K) bytes. + /// + /// It's important to note that the callback will be invoked for the headers + /// of all responses received after initiating a request and not just the + /// final response. This includes all responses which occur during + /// authentication negotiation. If you need to operate on only the headers + /// from the final response, you will need to collect headers in the + /// callback yourself and use HTTP status lines, for example, to delimit + /// response boundaries. + /// + /// When a server sends a chunked encoded transfer, it may contain a + /// trailer. That trailer is identical to a HTTP header and if such a + /// trailer is received it is passed to the application using this callback + /// as well. There are several ways to detect it being a trailer and not an + /// ordinary header: 1) it comes after the response-body. 2) it comes after + /// the final header line (CR LF) 3) a Trailer: header among the regular + /// response-headers mention what header(s) to expect in the trailer. + /// + /// For non-HTTP protocols like FTP, POP3, IMAP and SMTP this function will + /// get called with the server responses to the commands that libcurl sends. + /// + /// By default this option is not set and corresponds to the + /// `CURLOPT_HEADERFUNCTION` and `CURLOPT_HEADERDATA` options. + /// + /// Note that the lifetime bound on this function is `'static`, but that + /// is often too restrictive. To use stack data consider calling the + /// `transfer` method and then using `header_function` to configure a + /// callback that can reference stack-local data. + /// + /// # Examples + /// + /// ``` + /// use std::str; + /// + /// use curl::easy::Easy; + /// + /// let mut handle = Easy::new(); + /// handle.url("https://www.rust-lang.org/").unwrap(); + /// handle.header_function(|header| { + /// print!("header: {}", str::from_utf8(header).unwrap()); + /// true + /// }).unwrap(); + /// handle.perform().unwrap(); + /// ``` + /// + /// Collecting headers to a stack local vector + /// + /// ``` + /// use std::str; + /// + /// use curl::easy::Easy; + /// + /// let mut headers = Vec::new(); + /// let mut handle = Easy::new(); + /// handle.url("https://www.rust-lang.org/").unwrap(); + /// + /// { + /// let mut transfer = handle.transfer(); + /// transfer.header_function(|header| { + /// headers.push(str::from_utf8(header).unwrap().to_string()); + /// true + /// }).unwrap(); + /// transfer.perform().unwrap(); + /// } + /// + /// println!("{:?}", headers); + /// ``` + pub fn header_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(&[u8]) -> bool + Send + 'static + { + self.data.header = Some(Box::new(f)); + unsafe { + self.set_header_function(easy_header_cb, + &*self.data as *const _ as *mut _) + } + } + + // TODO: shouldn't there be a libcurl typedef for this? + unsafe fn set_header_function(&self, + cb: extern fn(*mut c_char, + size_t, + size_t, + *mut c_void) -> size_t, + ptr: *mut c_void) -> Result<(), Error> { + try!(self.setopt_ptr(curl_sys::CURLOPT_HEADERFUNCTION, cb as *const _)); + try!(self.setopt_ptr(curl_sys::CURLOPT_HEADERDATA, ptr as *const _)); + Ok(()) + } + + // ========================================================================= + // Error options + + // TODO: error buffer and stderr + + /// Indicates whether this library will fail on HTTP response codes >= 400. + /// + /// This method is not fail-safe especially when authentication is involved. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_FAILONERROR`. + pub fn fail_on_error(&mut self, fail: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_FAILONERROR, fail as c_long) + } + + // ========================================================================= + // Network options + + /// Provides the URL which this handle will work with. + /// + /// The string provided must be URL-encoded with the format: + /// + /// ```text + /// scheme://host:port/path + /// ``` + /// + /// The syntax is not validated as part of this function and that is + /// deferred until later. + /// + /// By default this option is not set and `perform` will not work until it + /// is set. This option corresponds to `CURLOPT_URL`. + pub fn url(&mut self, url: &str) -> Result<(), Error> { + let url = try!(CString::new(url)); + self.setopt_str(curl_sys::CURLOPT_URL, &url) + } + + /// Configures the port number to connect to, instead of the one specified + /// in the URL or the default of the protocol. + pub fn port(&mut self, port: u16) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_PORT, port as c_long) + } + + // /// Indicates whether sequences of `/../` and `/./` will be squashed or not. + // /// + // /// By default this option is `false` and corresponds to + // /// `CURLOPT_PATH_AS_IS`. + // pub fn path_as_is(&mut self, as_is: bool) -> Result<(), Error> { + // } + + /// Provide the URL of a proxy to use. + /// + /// By default this option is not set and corresponds to `CURLOPT_PROXY`. + pub fn proxy(&mut self, url: &str) -> Result<(), Error> { + let url = try!(CString::new(url)); + self.setopt_str(curl_sys::CURLOPT_PROXY, &url) + } + + /// Provide port number the proxy is listening on. + /// + /// By default this option is not set (the default port for the proxy + /// protocol is used) and corresponds to `CURLOPT_PROXYPORT`. + pub fn proxy_port(&mut self, port: u16) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_PROXYPORT, port as c_long) + } + + /// Indicates the type of proxy being used. + /// + /// By default this option is `ProxyType::Http` and corresponds to + /// `CURLOPT_PROXYTYPE`. + pub fn proxy_type(&mut self, kind: ProxyType) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_PROXYTYPE, kind as c_long) + } + + /// Provide a list of hosts that should not be proxied to. + /// + /// This string is a comma-separated list of hosts which should not use the + /// proxy specified for connections. A single `*` character is also accepted + /// as a wildcard for all hosts. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_NOPROXY`. + pub fn noproxy(&mut self, skip: &str) -> Result<(), Error> { + let skip = try!(CString::new(skip)); + self.setopt_str(curl_sys::CURLOPT_PROXYTYPE, &skip) + } + + /// Inform curl whether it should tunnel all operations through the proxy. + /// + /// This essentially means that a `CONNECT` is sent to the proxy for all + /// outbound requests. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_HTTPPROXYTUNNEL`. + pub fn http_proxy_tunnel(&mut self, tunnel: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_HTTPPROXYTUNNEL, + tunnel as c_long) + } + + /// Tell curl which interface to bind to for an outgoing network interface. + /// + /// The interface name, IP address, or host name can be specified here. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_INTERFACE`. + pub fn interface(&mut self, interface: &str) -> Result<(), Error> { + let s = try!(CString::new(interface)); + self.setopt_str(curl_sys::CURLOPT_INTERFACE, &s) + } + + /// Indicate which port should be bound to locally for this connection. + /// + /// By default this option is 0 (any port) and corresponds to + /// `CURLOPT_LOCALPORT`. + pub fn set_local_port(&mut self, port: u16) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_LOCALPORT, port as c_long) + } + + /// Indicates the number of attempts libcurl will perform to find a working + /// port number. + /// + /// By default this option is 1 and corresponds to + /// `CURLOPT_LOCALPORTRANGE`. + pub fn local_port_range(&mut self, range: u16) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_LOCALPORTRANGE, + range as c_long) + } + + /// Sets the timeout of how long name resolves will be kept in memory. + /// + /// This is distinct from DNS TTL options and is entirely speculative. + /// + /// By default this option is 60s and corresponds to + /// `CURLOPT_DNS_CACHE_TIMEOUT`. + pub fn dns_cache_timeout(&mut self, dur: Duration) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_DNS_CACHE_TIMEOUT, + dur.as_secs() as c_long) + } + + /// Specify the preferred receive buffer size, in bytes. + /// + /// This is treated as a request, not an order, and the main point of this + /// is that the write callback may get called more often with smaller + /// chunks. + /// + /// By default this option is the maximum write size and corresopnds to + /// `CURLOPT_BUFFERSIZE`. + pub fn buffer_size(&mut self, size: usize) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_BUFFERSIZE, size as c_long) + } + + // /// Enable or disable TCP Fast Open + // /// + // /// By default this options defaults to `false` and corresponds to + // /// `CURLOPT_TCP_FASTOPEN` + // pub fn fast_open(&mut self, enable: bool) -> Result<(), Error> { + // } + + /// Configures whether the TCP_NODELAY option is set, or Nagle's algorithm + /// is disabled. + /// + /// The purpose of Nagle's algorithm is to minimize the number of small + /// packet's on the network, and disabling this may be less efficient in + /// some situations. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_TCP_NODELAY`. + pub fn tcp_nodelay(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_TCP_NODELAY, enable as c_long) + } + + // /// Configures whether TCP keepalive probes will be sent. + // /// + // /// The delay and frequency of these probes is controlled by `tcp_keepidle` + // /// and `tcp_keepintvl`. + // /// + // /// By default this option is `false` and corresponds to + // /// `CURLOPT_TCP_KEEPALIVE`. + // pub fn tcp_keepalive(&mut self, enable: bool) -> Result<(), Error> { + // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPALIVE, enable as c_long) + // } + + // /// Configures the TCP keepalive idle time wait. + // /// + // /// This is the delay, after which the connection is idle, keepalive probes + // /// will be sent. Not all operating systems support this. + // /// + // /// By default this corresponds to `CURLOPT_TCP_KEEPIDLE`. + // pub fn tcp_keepidle(&mut self, amt: Duration) -> Result<(), Error> { + // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPIDLE, + // amt.as_secs() as c_long) + // } + // + // /// Configures the delay between keepalive probes. + // /// + // /// By default this corresponds to `CURLOPT_TCP_KEEPINTVL`. + // pub fn tcp_keepintvl(&mut self, amt: Duration) -> Result<(), Error> { + // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPINTVL, + // amt.as_secs() as c_long) + // } + + /// Configures the scope for local IPv6 addresses. + /// + /// Sets the scope_id value to use when connecting to IPv6 or link-local + /// addresses. + /// + /// By default this value is 0 and corresponds to `CURLOPT_ADDRESS_SCOPE` + pub fn address_scope(&mut self, scope: u32) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_ADDRESS_SCOPE, + scope as c_long) + } + + // ========================================================================= + // Names and passwords + + /// Configures the username to pass as authentication for this connection. + /// + /// By default this value is not set and corresponds to `CURLOPT_USERNAME`. + pub fn username(&mut self, user: &str) -> Result<(), Error> { + let user = try!(CString::new(user)); + self.setopt_str(curl_sys::CURLOPT_USERNAME, &user) + } + + /// Configures the password to pass as authentication for this connection. + /// + /// By default this value is not set and corresponds to `CURLOPT_PASSWORD`. + pub fn password(&mut self, pass: &str) -> Result<(), Error> { + let pass = try!(CString::new(pass)); + self.setopt_str(curl_sys::CURLOPT_PASSWORD, &pass) + } + + /// Set HTTP server authentication methods to try + /// + /// If more than one method is set, libcurl will first query the site to see + /// which authentication methods it supports and then pick the best one you + /// allow it to use. For some methods, this will induce an extra network + /// round-trip. Set the actual name and password with the `password` and + /// `username` methods. + /// + /// For authentication with a proxy, see `proxy_auth`. + /// + /// By default this value is basic and corresponds to `CURLOPT_HTTPAUTH`. + pub fn http_auth(&mut self, auth: &Auth) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_HTTPAUTH, auth.bits) + } + + /// Configures the proxy username to pass as authentication for this + /// connection. + /// + /// By default this value is not set and corresponds to + /// `CURLOPT_PROXYUSERNAME`. + pub fn proxy_username(&mut self, user: &str) -> Result<(), Error> { + let user = try!(CString::new(user)); + self.setopt_str(curl_sys::CURLOPT_PROXYUSERNAME, &user) + } + + /// Configures the proxy password to pass as authentication for this + /// connection. + /// + /// By default this value is not set and corresponds to + /// `CURLOPT_PROXYPASSWORD`. + pub fn proxy_password(&mut self, pass: &str) -> Result<(), Error> { + let pass = try!(CString::new(pass)); + self.setopt_str(curl_sys::CURLOPT_PROXYPASSWORD, &pass) + } + + /// Set HTTP proxy authentication methods to try + /// + /// If more than one method is set, libcurl will first query the site to see + /// which authentication methods it supports and then pick the best one you + /// allow it to use. For some methods, this will induce an extra network + /// round-trip. Set the actual name and password with the `proxy_password` + /// and `proxy_username` methods. + /// + /// By default this value is basic and corresponds to `CURLOPT_PROXYAUTH`. + pub fn proxy_auth(&mut self, auth: &Auth) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_PROXYAUTH, auth.bits) + } + + /// Enable .netrc parsing + /// + /// By default the .netrc file is ignored and corresponds to `CURL_NETRC_IGNORED`. + pub fn netrc(&mut self, netrc: NetRc) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_NETRC, netrc as c_long) + } + + // ========================================================================= + // HTTP Options + + /// Indicates whether the referer header is automatically updated + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_AUTOREFERER`. + pub fn autoreferer(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_AUTOREFERER, enable as c_long) + } + + /// Enables automatic decompression of HTTP downloads. + /// + /// Sets the contents of the Accept-Encoding header sent in an HTTP request. + /// This enables decoding of a response with Content-Encoding. + /// + /// Currently supported encoding are `identity`, `zlib`, and `gzip`. A + /// zero-length string passed in will send all accepted encodings. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_ACCEPT_ENCODING`. + pub fn accept_encoding(&mut self, encoding: &str) -> Result<(), Error> { + let encoding = try!(CString::new(encoding)); + self.setopt_str(curl_sys::CURLOPT_ACCEPT_ENCODING, &encoding) + } + + /// Request the HTTP Transfer Encoding. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_TRANSFER_ENCODING`. + pub fn transfer_encoding(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_TRANSFER_ENCODING, enable as c_long) + } + + /// Follow HTTP 3xx redirects. + /// + /// Indicates whether any `Location` headers in the response should get + /// followed. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_FOLLOWLOCATION`. + pub fn follow_location(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_FOLLOWLOCATION, enable as c_long) + } + + /// Send credentials to hosts other than the first as well. + /// + /// Sends username/password credentials even when the host changes as part + /// of a redirect. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_UNRESTRICTED_AUTH`. + pub fn unrestricted_auth(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_UNRESTRICTED_AUTH, enable as c_long) + } + + /// Set the maximum number of redirects allowed. + /// + /// A value of 0 will refuse any redirect. + /// + /// By default this option is `-1` (unlimited) and corresponds to + /// `CURLOPT_MAXREDIRS`. + pub fn max_redirections(&mut self, max: u32) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_MAXREDIRS, max as c_long) + } + + // TODO: post_redirections + + /// Make an HTTP PUT request. + /// + /// By default this option is `false` and corresponds to `CURLOPT_PUT`. + pub fn put(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_PUT, enable as c_long) + } + + /// Make an HTTP POST request. + /// + /// This will also make the library use the + /// `Content-Type: application/x-www-form-urlencoded` header. + /// + /// POST data can be specified through `post_fields` or by specifying a read + /// function. + /// + /// By default this option is `false` and corresponds to `CURLOPT_POST`. + pub fn post(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_POST, enable as c_long) + } + + /// Configures the data that will be uploaded as part of a POST. + /// + /// Note that the data is copied into this handle and if that's not desired + /// then the read callbacks can be used instead. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_COPYPOSTFIELDS`. + pub fn post_fields_copy(&mut self, data: &[u8]) -> Result<(), Error> { + // Set the length before the pointer so libcurl knows how much to read + try!(self.post_field_size(data.len() as u64)); + self.setopt_ptr(curl_sys::CURLOPT_COPYPOSTFIELDS, + data.as_ptr() as *const _) + } + + /// Configures the size of data that's going to be uploaded as part of a + /// POST operation. + /// + /// This is called automaticsally as part of `post_fields` and should only + /// be called if data is being provided in a read callback (and even then + /// it's optional). + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_POSTFIELDSIZE_LARGE`. + pub fn post_field_size(&mut self, size: u64) -> Result<(), Error> { + // Clear anything previous to ensure we don't read past a buffer + try!(self.setopt_ptr(curl_sys::CURLOPT_POSTFIELDS, 0 as *const _)); + self.setopt_off_t(curl_sys::CURLOPT_POSTFIELDSIZE_LARGE, + size as curl_sys::curl_off_t) + } + + /// Tells libcurl you want a multipart/formdata HTTP POST to be made and you + /// instruct what data to pass on to the server in the `form` argument. + /// + /// By default this option is set to null and corresponds to + /// `CURLOPT_HTTPPOST`. + pub fn httppost(&mut self, form: Form) -> Result<(), Error> { + try!(self.setopt_ptr(curl_sys::CURLOPT_HTTPPOST, + form.head as *const _)); + self.data.form = Some(form); + Ok(()) + } + + /// Sets the HTTP referer header + /// + /// By default this option is not set and corresponds to `CURLOPT_REFERER`. + pub fn referer(&mut self, referer: &str) -> Result<(), Error> { + let referer = try!(CString::new(referer)); + self.setopt_str(curl_sys::CURLOPT_REFERER, &referer) + } + + /// Sets the HTTP user-agent header + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_USERAGENT`. + pub fn useragent(&mut self, useragent: &str) -> Result<(), Error> { + let useragent = try!(CString::new(useragent)); + self.setopt_str(curl_sys::CURLOPT_USERAGENT, &useragent) + } + + /// Add some headers to this HTTP request. + /// + /// If you add a header that is otherwise used internally, the value here + /// takes precedence. If a header is added with no content (like `Accept:`) + /// the internally the header will get disabled. To add a header with no + /// content, use the form `MyHeader;` (not the trailing semicolon). + /// + /// Headers must not be CRLF terminated. Many replaced headers have common + /// shortcuts which should be prefered. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_HTTPHEADER` + /// + /// # Examples + /// + /// ``` + /// use curl::easy::{Easy, List}; + /// + /// let mut list = List::new(); + /// list.append("Foo: bar").unwrap(); + /// list.append("Bar: baz").unwrap(); + /// + /// let mut handle = Easy::new(); + /// handle.url("https://www.rust-lang.org/").unwrap(); + /// handle.http_headers(list).unwrap(); + /// handle.perform().unwrap(); + /// ``` + pub fn http_headers(&mut self, list: List) -> Result<(), Error> { + let ptr = list.raw; + self.data.header_list = Some(list); + self.setopt_ptr(curl_sys::CURLOPT_HTTPHEADER, ptr as *const _) + } + + // /// Add some headers to send to the HTTP proxy. + // /// + // /// This function is essentially the same as `http_headers`. + // /// + // /// By default this option is not set and corresponds to + // /// `CURLOPT_PROXYHEADER` + // pub fn proxy_headers(&mut self, list: &'a List) -> Result<(), Error> { + // self.setopt_ptr(curl_sys::CURLOPT_PROXYHEADER, list.raw as *const _) + // } + + /// Set the contents of the HTTP Cookie header. + /// + /// Pass a string of the form `name=contents` for one cookie value or + /// `name1=val1; name2=val2` for multiple values. + /// + /// Using this option multiple times will only make the latest string + /// override the previous ones. This option will not enable the cookie + /// engine, use `cookie_file` or `cookie_jar` to do that. + /// + /// By default this option is not set and corresponds to `CURLOPT_COOKIE`. + pub fn cookie(&mut self, cookie: &str) -> Result<(), Error> { + let cookie = try!(CString::new(cookie)); + self.setopt_str(curl_sys::CURLOPT_COOKIE, &cookie) + } + + /// Set the file name to read cookies from. + /// + /// The cookie data can be in either the old Netscape / Mozilla cookie data + /// format or just regular HTTP headers (Set-Cookie style) dumped to a file. + /// + /// This also enables the cookie engine, making libcurl parse and send + /// cookies on subsequent requests with this handle. + /// + /// Given an empty or non-existing file or by passing the empty string ("") + /// to this option, you can enable the cookie engine without reading any + /// initial cookies. + /// + /// If you use this option multiple times, you just add more files to read. + /// Subsequent files will add more cookies. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_COOKIEFILE`. + pub fn cookie_file>(&mut self, file: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_COOKIEFILE, file.as_ref()) + } + + /// Set the file name to store cookies to. + /// + /// This will make libcurl write all internally known cookies to the file + /// when this handle is dropped. If no cookies are known, no file will be + /// created. Specify "-" as filename to instead have the cookies written to + /// stdout. Using this option also enables cookies for this session, so if + /// you for example follow a location it will make matching cookies get sent + /// accordingly. + /// + /// Note that libcurl doesn't read any cookies from the cookie jar. If you + /// want to read cookies from a file, use `cookie_file`. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_COOKIEJAR`. + pub fn cookie_jar>(&mut self, file: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_COOKIEJAR, file.as_ref()) + } + + /// Start a new cookie session + /// + /// Marks this as a new cookie "session". It will force libcurl to ignore + /// all cookies it is about to load that are "session cookies" from the + /// previous session. By default, libcurl always stores and loads all + /// cookies, independent if they are session cookies or not. Session cookies + /// are cookies without expiry date and they are meant to be alive and + /// existing for this "session" only. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_COOKIESESSION`. + pub fn cookie_session(&mut self, session: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_COOKIESESSION, session as c_long) + } + + /// Add to or manipulate cookies held in memory. + /// + /// Such a cookie can be either a single line in Netscape / Mozilla format + /// or just regular HTTP-style header (Set-Cookie: ...) format. This will + /// also enable the cookie engine. This adds that single cookie to the + /// internal cookie store. + /// + /// Exercise caution if you are using this option and multiple transfers may + /// occur. If you use the Set-Cookie format and don't specify a domain then + /// the cookie is sent for any domain (even after redirects are followed) + /// and cannot be modified by a server-set cookie. If a server sets a cookie + /// of the same name (or maybe you've imported one) then both will be sent + /// on a future transfer to that server, likely not what you intended. + /// address these issues set a domain in Set-Cookie or use the Netscape + /// format. + /// + /// Additionally, there are commands available that perform actions if you + /// pass in these exact strings: + /// + /// * "ALL" - erases all cookies held in memory + /// * "SESS" - erases all session cookies held in memory + /// * "FLUSH" - write all known cookies to the specified cookie jar + /// * "RELOAD" - reread all cookies from the cookie file + /// + /// By default this options corresponds to `CURLOPT_COOKIELIST` + pub fn cookie_list(&mut self, cookie: &str) -> Result<(), Error> { + let cookie = try!(CString::new(cookie)); + self.setopt_str(curl_sys::CURLOPT_COOKIELIST, &cookie) + } + + /// Ask for a HTTP GET request. + /// + /// By default this option is `false` and corresponds to `CURLOPT_HTTPGET`. + pub fn get(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_HTTPGET, enable as c_long) + } + + // /// Ask for a HTTP GET request. + // /// + // /// By default this option is `false` and corresponds to `CURLOPT_HTTPGET`. + // pub fn http_version(&mut self, vers: &str) -> Result<(), Error> { + // self.setopt_long(curl_sys::CURLOPT_HTTPGET, enable as c_long) + // } + + /// Ignore the content-length header. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_IGNORE_CONTENT_LENGTH`. + pub fn ignore_content_length(&mut self, ignore: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_IGNORE_CONTENT_LENGTH, + ignore as c_long) + } + + /// Enable or disable HTTP content decoding. + /// + /// By default this option is `true` and corresponds to + /// `CURLOPT_HTTP_CONTENT_DECODING`. + pub fn http_content_decoding(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_HTTP_CONTENT_DECODING, + enable as c_long) + } + + /// Enable or disable HTTP transfer decoding. + /// + /// By default this option is `true` and corresponds to + /// `CURLOPT_HTTP_TRANSFER_DECODING`. + pub fn http_transfer_decoding(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_HTTP_TRANSFER_DECODING, + enable as c_long) + } + + // /// Timeout for the Expect: 100-continue response + // /// + // /// By default this option is 1s and corresponds to + // /// `CURLOPT_EXPECT_100_TIMEOUT_MS`. + // pub fn expect_100_timeout(&mut self, enable: bool) -> Result<(), Error> { + // self.setopt_long(curl_sys::CURLOPT_HTTP_TRANSFER_DECODING, + // enable as c_long) + // } + + // /// Wait for pipelining/multiplexing. + // /// + // /// Tells libcurl to prefer to wait for a connection to confirm or deny that + // /// it can do pipelining or multiplexing before continuing. + // /// + // /// When about to perform a new transfer that allows pipelining or + // /// multiplexing, libcurl will check for existing connections to re-use and + // /// pipeline on. If no such connection exists it will immediately continue + // /// and create a fresh new connection to use. + // /// + // /// By setting this option to `true` - having `pipeline` enabled for the + // /// multi handle this transfer is associated with - libcurl will instead + // /// wait for the connection to reveal if it is possible to + // /// pipeline/multiplex on before it continues. This enables libcurl to much + // /// better keep the number of connections to a minimum when using pipelining + // /// or multiplexing protocols. + // /// + // /// The effect thus becomes that with this option set, libcurl prefers to + // /// wait and re-use an existing connection for pipelining rather than the + // /// opposite: prefer to open a new connection rather than waiting. + // /// + // /// The waiting time is as long as it takes for the connection to get up and + // /// for libcurl to get the necessary response back that informs it about its + // /// protocol and support level. + // pub fn http_pipewait(&mut self, enable: bool) -> Result<(), Error> { + // } + + + // ========================================================================= + // Protocol Options + + /// Indicates the range that this request should retrieve. + /// + /// The string provided should be of the form `N-M` where either `N` or `M` + /// can be left out. For HTTP transfers multiple ranges separated by commas + /// are also accepted. + /// + /// By default this option is not set and corresponds to `CURLOPT_RANGE`. + pub fn range(&mut self, range: &str) -> Result<(), Error> { + let range = try!(CString::new(range)); + self.setopt_str(curl_sys::CURLOPT_RANGE, &range) + } + + /// Set a point to resume transfer from + /// + /// Specify the offset in bytes you want the transfer to start from. + /// + /// By default this option is 0 and corresponds to + /// `CURLOPT_RESUME_FROM_LARGE`. + pub fn resume_from(&mut self, from: u64) -> Result<(), Error> { + self.setopt_off_t(curl_sys::CURLOPT_RESUME_FROM_LARGE, + from as curl_sys::curl_off_t) + } + + /// Set a custom request string + /// + /// Specifies that a custom request will be made (e.g. a custom HTTP + /// method). This does not change how libcurl performs internally, just + /// changes the string sent to the server. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_CUSTOMREQUEST`. + pub fn custom_request(&mut self, request: &str) -> Result<(), Error> { + let request = try!(CString::new(request)); + self.setopt_str(curl_sys::CURLOPT_CUSTOMREQUEST, &request) + } + + /// Get the modification time of the remote resource + /// + /// If true, libcurl will attempt to get the modification time of the + /// remote document in this operation. This requires that the remote server + /// sends the time or replies to a time querying command. The `filetime` + /// function can be used after a transfer to extract the received time (if + /// any). + /// + /// By default this option is `false` and corresponds to `CURLOPT_FILETIME` + pub fn fetch_filetime(&mut self, fetch: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_FILETIME, fetch as c_long) + } + + /// Indicate whether to download the request without getting the body + /// + /// This is useful, for example, for doing a HEAD request. + /// + /// By default this option is `false` and corresponds to `CURLOPT_NOBODY`. + pub fn nobody(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_NOBODY, enable as c_long) + } + + /// Set the size of the input file to send off. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_INFILESIZE_LARGE`. + pub fn in_filesize(&mut self, size: u64) -> Result<(), Error> { + self.setopt_off_t(curl_sys::CURLOPT_INFILESIZE_LARGE, + size as curl_sys::curl_off_t) + } + + /// Enable or disable data upload. + /// + /// This means that a PUT request will be made for HTTP and probably wants + /// to be combined with the read callback as well as the `in_filesize` + /// method. + /// + /// By default this option is `false` and corresponds to `CURLOPT_UPLOAD`. + pub fn upload(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_UPLOAD, enable as c_long) + } + + /// Configure the maximum file size to download. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_MAXFILESIZE_LARGE`. + pub fn max_filesize(&mut self, size: u64) -> Result<(), Error> { + self.setopt_off_t(curl_sys::CURLOPT_MAXFILESIZE_LARGE, + size as curl_sys::curl_off_t) + } + + /// Selects a condition for a time request. + /// + /// This value indicates how the `time_value` option is interpreted. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_TIMECONDITION`. + pub fn time_condition(&mut self, cond: TimeCondition) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_TIMECONDITION, cond as c_long) + } + + /// Sets the time value for a conditional request. + /// + /// The value here should be the number of seconds elapsed since January 1, + /// 1970. To pass how to interpret this value, use `time_condition`. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_TIMEVALUE`. + pub fn time_value(&mut self, val: i64) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_TIMEVALUE, val as c_long) + } + + // ========================================================================= + // Connection Options + + /// Set maximum time the request is allowed to take. + /// + /// Normally, name lookups can take a considerable time and limiting + /// operations to less than a few minutes risk aborting perfectly normal + /// operations. + /// + /// If libcurl is built to use the standard system name resolver, that + /// portion of the transfer will still use full-second resolution for + /// timeouts with a minimum timeout allowed of one second. + /// + /// In unix-like systems, this might cause signals to be used unless + /// `nosignal` is set. + /// + /// Since this puts a hard limit for how long a request is allowed to + /// take, it has limited use in dynamic use cases with varying transfer + /// times. You are then advised to explore `low_speed_limit`, + /// `low_speed_time` or using `progress_function` to implement your own + /// timeout logic. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_TIMEOUT_MS`. + pub fn timeout(&mut self, timeout: Duration) -> Result<(), Error> { + // TODO: checked arithmetic and casts + // TODO: use CURLOPT_TIMEOUT if the timeout is too great + let ms = timeout.as_secs() * 1000 + + (timeout.subsec_nanos() / 1_000_000) as u64; + self.setopt_long(curl_sys::CURLOPT_TIMEOUT_MS, ms as c_long) + + } + + /// Set the low speed limit in bytes per second. + /// + /// This specifies the average transfer speed in bytes per second that the + /// transfer should be below during `low_speed_time` for libcurl to consider + /// it to be too slow and abort. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_LOW_SPEED_LIMIT`. + pub fn low_speed_limit(&mut self, limit: u32) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_LOW_SPEED_LIMIT, limit as c_long) + } + + /// Set the low speed time period. + /// + /// Specifies the window of time for which if the transfer rate is below + /// `low_speed_limit` the request will be aborted. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_LOW_SPEED_TIME`. + pub fn low_speed_time(&mut self, dur: Duration) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_LOW_SPEED_TIME, + dur.as_secs() as c_long) + } + + /// Rate limit data upload speed + /// + /// If an upload exceeds this speed (counted in bytes per second) on + /// cumulative average during the transfer, the transfer will pause to keep + /// the average rate less than or equal to the parameter value. + /// + /// By default this option is not set (unlimited speed) and corresponds to + /// `CURLOPT_MAX_SEND_SPEED_LARGE`. + pub fn max_send_speed(&mut self, speed: u64) -> Result<(), Error> { + self.setopt_off_t(curl_sys::CURLOPT_MAX_SEND_SPEED_LARGE, + speed as curl_sys::curl_off_t) + } + + /// Rate limit data download speed + /// + /// If a download exceeds this speed (counted in bytes per second) on + /// cumulative average during the transfer, the transfer will pause to keep + /// the average rate less than or equal to the parameter value. + /// + /// By default this option is not set (unlimited speed) and corresponds to + /// `CURLOPT_MAX_RECV_SPEED_LARGE`. + pub fn max_recv_speed(&mut self, speed: u64) -> Result<(), Error> { + self.setopt_off_t(curl_sys::CURLOPT_MAX_RECV_SPEED_LARGE, + speed as curl_sys::curl_off_t) + } + + /// Set the maximum connection cache size. + /// + /// The set amount will be the maximum number of simultaneously open + /// persistent connections that libcurl may cache in the pool associated + /// with this handle. The default is 5, and there isn't much point in + /// changing this value unless you are perfectly aware of how this works and + /// changes libcurl's behaviour. This concerns connections using any of the + /// protocols that support persistent connections. + /// + /// When reaching the maximum limit, curl closes the oldest one in the cache + /// to prevent increasing the number of open connections. + /// + /// By default this option is set to 5 and corresponds to + /// `CURLOPT_MAXCONNECTS` + pub fn max_connects(&mut self, max: u32) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_MAXCONNECTS, max as c_long) + } + + /// Force a new connection to be used. + /// + /// Makes the next transfer use a new (fresh) connection by force instead of + /// trying to re-use an existing one. This option should be used with + /// caution and only if you understand what it does as it may seriously + /// impact performance. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_FRESH_CONNECT`. + pub fn fresh_connect(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_FRESH_CONNECT, enable as c_long) + } + + /// Make connection get closed at once after use. + /// + /// Makes libcurl explicitly close the connection when done with the + /// transfer. Normally, libcurl keeps all connections alive when done with + /// one transfer in case a succeeding one follows that can re-use them. + /// This option should be used with caution and only if you understand what + /// it does as it can seriously impact performance. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_FORBID_REUSE`. + pub fn forbid_reuse(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_FORBID_REUSE, enable as c_long) + } + + /// Timeout for the connect phase + /// + /// This is the maximum time that you allow the connection phase to the + /// server to take. This only limits the connection phase, it has no impact + /// once it has connected. + /// + /// By default this value is 300 seconds and corresponds to + /// `CURLOPT_CONNECTTIMEOUT_MS`. + pub fn connect_timeout(&mut self, timeout: Duration) -> Result<(), Error> { + let ms = timeout.as_secs() * 1000 + + (timeout.subsec_nanos() / 1_000_000) as u64; + self.setopt_long(curl_sys::CURLOPT_CONNECTTIMEOUT_MS, ms as c_long) + } + + /// Specify which IP protocol version to use + /// + /// Allows an application to select what kind of IP addresses to use when + /// resolving host names. This is only interesting when using host names + /// that resolve addresses using more than one version of IP. + /// + /// By default this value is "any" and corresponds to `CURLOPT_IPRESOLVE`. + pub fn ip_resolve(&mut self, resolve: IpResolve) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_IPRESOLVE, resolve as c_long) + } + + /// Configure whether to stop when connected to target server + /// + /// When enabled it tells the library to perform all the required proxy + /// authentication and connection setup, but no data transfer, and then + /// return. + /// + /// The option can be used to simply test a connection to a server. + /// + /// By default this value is `false` and corresponds to + /// `CURLOPT_CONNECT_ONLY`. + pub fn connect_only(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_CONNECT_ONLY, enable as c_long) + } + + // /// Set interface to speak DNS over. + // /// + // /// Set the name of the network interface that the DNS resolver should bind + // /// to. This must be an interface name (not an address). + // /// + // /// By default this option is not set and corresponds to + // /// `CURLOPT_DNS_INTERFACE`. + // pub fn dns_interface(&mut self, interface: &str) -> Result<(), Error> { + // let interface = try!(CString::new(interface)); + // self.setopt_str(curl_sys::CURLOPT_DNS_INTERFACE, &interface) + // } + // + // /// IPv4 address to bind DNS resolves to + // /// + // /// Set the local IPv4 address that the resolver should bind to. The + // /// argument should be of type char * and contain a single numerical IPv4 + // /// address as a string. + // /// + // /// By default this option is not set and corresponds to + // /// `CURLOPT_DNS_LOCAL_IP4`. + // pub fn dns_local_ip4(&mut self, ip: &str) -> Result<(), Error> { + // let ip = try!(CString::new(ip)); + // self.setopt_str(curl_sys::CURLOPT_DNS_LOCAL_IP4, &ip) + // } + // + // /// IPv6 address to bind DNS resolves to + // /// + // /// Set the local IPv6 address that the resolver should bind to. The + // /// argument should be of type char * and contain a single numerical IPv6 + // /// address as a string. + // /// + // /// By default this option is not set and corresponds to + // /// `CURLOPT_DNS_LOCAL_IP6`. + // pub fn dns_local_ip6(&mut self, ip: &str) -> Result<(), Error> { + // let ip = try!(CString::new(ip)); + // self.setopt_str(curl_sys::CURLOPT_DNS_LOCAL_IP6, &ip) + // } + // + // /// Set preferred DNS servers. + // /// + // /// Provides a list of DNS servers to be used instead of the system default. + // /// The format of the dns servers option is: + // /// + // /// ```text + // /// host[:port],[host[:port]]... + // /// ``` + // /// + // /// By default this option is not set and corresponds to + // /// `CURLOPT_DNS_SERVERS`. + // pub fn dns_servers(&mut self, servers: &str) -> Result<(), Error> { + // let servers = try!(CString::new(servers)); + // self.setopt_str(curl_sys::CURLOPT_DNS_SERVERS, &servers) + // } + + // ========================================================================= + // SSL/Security Options + + /// Sets the SSL client certificate. + /// + /// The string should be the file name of your client certificate. The + /// default format is "P12" on Secure Transport and "PEM" on other engines, + /// and can be changed with `ssl_cert_type`. + /// + /// With NSS or Secure Transport, this can also be the nickname of the + /// certificate you wish to authenticate with as it is named in the security + /// database. If you want to use a file from the current directory, please + /// precede it with "./" prefix, in order to avoid confusion with a + /// nickname. + /// + /// When using a client certificate, you most likely also need to provide a + /// private key with `ssl_key`. + /// + /// By default this option is not set and corresponds to `CURLOPT_SSLCERT`. + pub fn ssl_cert>(&mut self, cert: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_SSLCERT, cert.as_ref()) + } + + /// Specify type of the client SSL certificate. + /// + /// The string should be the format of your certificate. Supported formats + /// are "PEM" and "DER", except with Secure Transport. OpenSSL (versions + /// 0.9.3 and later) and Secure Transport (on iOS 5 or later, or OS X 10.7 + /// or later) also support "P12" for PKCS#12-encoded files. + /// + /// By default this option is "PEM" and corresponds to + /// `CURLOPT_SSLCERTTYPE`. + pub fn ssl_cert_type(&mut self, kind: &str) -> Result<(), Error> { + let kind = try!(CString::new(kind)); + self.setopt_str(curl_sys::CURLOPT_SSLCERTTYPE, &kind) + } + + /// Specify private keyfile for TLS and SSL client cert. + /// + /// The string should be the file name of your private key. The default + /// format is "PEM" and can be changed with `ssl_key_type`. + /// + /// (iOS and Mac OS X only) This option is ignored if curl was built against + /// Secure Transport. Secure Transport expects the private key to be already + /// present in the keychain or PKCS#12 file containing the certificate. + /// + /// By default this option is not set and corresponds to `CURLOPT_SSLKEY`. + pub fn ssl_key>(&mut self, key: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_SSLKEY, key.as_ref()) + } + + /// Set type of the private key file. + /// + /// The string should be the format of your private key. Supported formats + /// are "PEM", "DER" and "ENG". + /// + /// The format "ENG" enables you to load the private key from a crypto + /// engine. In this case `ssl_key` is used as an identifier passed to + /// the engine. You have to set the crypto engine with `ssl_engine`. + /// "DER" format key file currently does not work because of a bug in + /// OpenSSL. + /// + /// By default this option is "PEM" and corresponds to + /// `CURLOPT_SSLKEYTYPE`. + pub fn ssl_key_type(&mut self, kind: &str) -> Result<(), Error> { + let kind = try!(CString::new(kind)); + self.setopt_str(curl_sys::CURLOPT_SSLKEYTYPE, &kind) + } + + /// Set passphrase to private key. + /// + /// This will be used as the password required to use the `ssl_key`. + /// You never needed a pass phrase to load a certificate but you need one to + /// load your private key. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_KEYPASSWD`. + pub fn key_password(&mut self, password: &str) -> Result<(), Error> { + let password = try!(CString::new(password)); + self.setopt_str(curl_sys::CURLOPT_KEYPASSWD, &password) + } + + /// Set the SSL engine identifier. + /// + /// This will be used as the identifier for the crypto engine you want to + /// use for your private key. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_SSLENGINE`. + pub fn ssl_engine(&mut self, engine: &str) -> Result<(), Error> { + let engine = try!(CString::new(engine)); + self.setopt_str(curl_sys::CURLOPT_SSLENGINE, &engine) + } + + /// Make this handle's SSL engine the default. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_SSLENGINE_DEFAULT`. + pub fn ssl_engine_default(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_SSLENGINE_DEFAULT, enable as c_long) + } + + // /// Enable TLS false start. + // /// + // /// This option determines whether libcurl should use false start during the + // /// TLS handshake. False start is a mode where a TLS client will start + // /// sending application data before verifying the server's Finished message, + // /// thus saving a round trip when performing a full handshake. + // /// + // /// By default this option is not set and corresponds to + // /// `CURLOPT_SSL_FALSESTARTE`. + // pub fn ssl_false_start(&mut self, enable: bool) -> Result<(), Error> { + // self.setopt_long(curl_sys::CURLOPT_SSLENGINE_DEFAULT, enable as c_long) + // } + + /// Set preferred HTTP version. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_HTTP_VERSION`. + pub fn http_version(&mut self, version: HttpVersion) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_HTTP_VERSION, version as c_long) + } + + /// Set preferred TLS/SSL version. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_SSLVERSION`. + pub fn ssl_version(&mut self, version: SslVersion) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_SSLVERSION, version as c_long) + } + + /// Verify the certificate's name against host. + /// + /// This should be disabled with great caution! It basically disables the + /// security features of SSL if it is disabled. + /// + /// By default this option is set to `true` and corresponds to + /// `CURLOPT_SSL_VERIFYHOST`. + pub fn ssl_verify_host(&mut self, verify: bool) -> Result<(), Error> { + let val = if verify {2} else {0}; + self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYHOST, val) + } + + /// Verify the peer's SSL certificate. + /// + /// This should be disabled with great caution! It basically disables the + /// security features of SSL if it is disabled. + /// + /// By default this option is set to `true` and corresponds to + /// `CURLOPT_SSL_VERIFYPEER`. + pub fn ssl_verify_peer(&mut self, verify: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYPEER, verify as c_long) + } + + // /// Verify the certificate's status. + // /// + // /// This option determines whether libcurl verifies the status of the server + // /// cert using the "Certificate Status Request" TLS extension (aka. OCSP + // /// stapling). + // /// + // /// By default this option is set to `false` and corresponds to + // /// `CURLOPT_SSL_VERIFYSTATUS`. + // pub fn ssl_verify_status(&mut self, verify: bool) -> Result<(), Error> { + // self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYSTATUS, verify as c_long) + // } + + /// Specify the path to Certificate Authority (CA) bundle + /// + /// The file referenced should hold one or more certificates to verify the + /// peer with. + /// + /// This option is by default set to the system path where libcurl's cacert + /// bundle is assumed to be stored, as established at build time. + /// + /// If curl is built against the NSS SSL library, the NSS PEM PKCS#11 module + /// (libnsspem.so) needs to be available for this option to work properly. + /// + /// By default this option is the system defaults, and corresponds to + /// `CURLOPT_CAINFO`. + pub fn cainfo>(&mut self, path: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_CAINFO, path.as_ref()) + } + + /// Set the issuer SSL certificate filename + /// + /// Specifies a file holding a CA certificate in PEM format. If the option + /// is set, an additional check against the peer certificate is performed to + /// verify the issuer is indeed the one associated with the certificate + /// provided by the option. This additional check is useful in multi-level + /// PKI where one needs to enforce that the peer certificate is from a + /// specific branch of the tree. + /// + /// This option makes sense only when used in combination with the + /// `ssl_verify_peer` option. Otherwise, the result of the check is not + /// considered as failure. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_ISSUERCERT`. + pub fn issuer_cert>(&mut self, path: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_ISSUERCERT, path.as_ref()) + } + + /// Specify directory holding CA certificates + /// + /// Names a directory holding multiple CA certificates to verify the peer + /// with. If libcurl is built against OpenSSL, the certificate directory + /// must be prepared using the openssl c_rehash utility. This makes sense + /// only when used in combination with the `ssl_verify_peer` option. + /// + /// By default this option is not set and corresponds to `CURLOPT_CAPATH`. + pub fn capath>(&mut self, path: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_CAPATH, path.as_ref()) + } + + /// Specify a Certificate Revocation List file + /// + /// Names a file with the concatenation of CRL (in PEM format) to use in the + /// certificate validation that occurs during the SSL exchange. + /// + /// When curl is built to use NSS or GnuTLS, there is no way to influence + /// the use of CRL passed to help in the verification process. When libcurl + /// is built with OpenSSL support, X509_V_FLAG_CRL_CHECK and + /// X509_V_FLAG_CRL_CHECK_ALL are both set, requiring CRL check against all + /// the elements of the certificate chain if a CRL file is passed. + /// + /// This option makes sense only when used in combination with the + /// `ssl_verify_peer` option. + /// + /// A specific error code (`is_ssl_crl_badfile`) is defined with the + /// option. It is returned when the SSL exchange fails because the CRL file + /// cannot be loaded. A failure in certificate verification due to a + /// revocation information found in the CRL does not trigger this specific + /// error. + /// + /// By default this option is not set and corresponds to `CURLOPT_CRLFILE`. + pub fn crlfile>(&mut self, path: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_CRLFILE, path.as_ref()) + } + + /// Request SSL certificate information + /// + /// Enable libcurl's certificate chain info gatherer. With this enabled, + /// libcurl will extract lots of information and data about the certificates + /// in the certificate chain used in the SSL connection. + /// + /// By default this option is `false` and corresponds to + /// `CURLOPT_CERTINFO`. + pub fn certinfo(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_CERTINFO, enable as c_long) + } + + // /// Set pinned public key. + // /// + // /// Pass a pointer to a zero terminated string as parameter. The string can + // /// be the file name of your pinned public key. The file format expected is + // /// "PEM" or "DER". The string can also be any number of base64 encoded + // /// sha256 hashes preceded by "sha256//" and separated by ";" + // /// + // /// When negotiating a TLS or SSL connection, the server sends a certificate + // /// indicating its identity. A public key is extracted from this certificate + // /// and if it does not exactly match the public key provided to this option, + // /// curl will abort the connection before sending or receiving any data. + // /// + // /// By default this option is not set and corresponds to + // /// `CURLOPT_PINNEDPUBLICKEY`. + // pub fn pinned_public_key(&mut self, enable: bool) -> Result<(), Error> { + // self.setopt_long(curl_sys::CURLOPT_CERTINFO, enable as c_long) + // } + + /// Specify a source for random data + /// + /// The file will be used to read from to seed the random engine for SSL and + /// more. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_RANDOM_FILE`. + pub fn random_file>(&mut self, p: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_RANDOM_FILE, p.as_ref()) + } + + /// Specify EGD socket path. + /// + /// Indicates the path name to the Entropy Gathering Daemon socket. It will + /// be used to seed the random engine for SSL. + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_EGDSOCKET`. + pub fn egd_socket>(&mut self, p: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_EGDSOCKET, p.as_ref()) + } + + /// Specify ciphers to use for TLS. + /// + /// Holds the list of ciphers to use for the SSL connection. The list must + /// be syntactically correct, it consists of one or more cipher strings + /// separated by colons. Commas or spaces are also acceptable separators + /// but colons are normally used, !, - and + can be used as operators. + /// + /// For OpenSSL and GnuTLS valid examples of cipher lists include 'RC4-SHA', + /// ´SHA1+DES´, 'TLSv1' and 'DEFAULT'. The default list is normally set when + /// you compile OpenSSL. + /// + /// You'll find more details about cipher lists on this URL: + /// + /// https://www.openssl.org/docs/apps/ciphers.html + /// + /// For NSS, valid examples of cipher lists include 'rsa_rc4_128_md5', + /// ´rsa_aes_128_sha´, etc. With NSS you don't add/remove ciphers. If one + /// uses this option then all known ciphers are disabled and only those + /// passed in are enabled. + /// + /// You'll find more details about the NSS cipher lists on this URL: + /// + /// http://git.fedorahosted.org/cgit/mod_nss.git/plain/docs/mod_nss.html#Directives + /// + /// By default this option is not set and corresponds to + /// `CURLOPT_SSL_CIPHER_LIST`. + pub fn ssl_cipher_list(&mut self, ciphers: &str) -> Result<(), Error> { + let ciphers = try!(CString::new(ciphers)); + self.setopt_str(curl_sys::CURLOPT_SSL_CIPHER_LIST, &ciphers) + } + + /// Enable or disable use of the SSL session-ID cache + /// + /// By default all transfers are done using the cache enabled. While nothing + /// ever should get hurt by attempting to reuse SSL session-IDs, there seem + /// to be or have been broken SSL implementations in the wild that may + /// require you to disable this in order for you to succeed. + /// + /// This corresponds to the `CURLOPT_SSL_SESSIONID_CACHE` option. + pub fn ssl_sessionid_cache(&mut self, enable: bool) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_SSL_SESSIONID_CACHE, + enable as c_long) + } + + /// Set SSL behavior options + /// + /// Inform libcurl about SSL specific behaviors. + /// + /// This corresponds to the `CURLOPT_SSL_OPTIONS` option. + pub fn ssl_options(&mut self, bits: &SslOpt) -> Result<(), Error> { + self.setopt_long(curl_sys::CURLOPT_SSL_OPTIONS, bits.bits) + } + + // /// Set SSL behavior options for proxies + // /// + // /// Inform libcurl about SSL specific behaviors. + // /// + // /// This corresponds to the `CURLOPT_PROXY_SSL_OPTIONS` option. + // pub fn proxy_ssl_options(&mut self, bits: &SslOpt) -> Result<(), Error> { + // self.setopt_long(curl_sys::CURLOPT_PROXY_SSL_OPTIONS, bits.bits) + // } + + // /// Stores a private pointer-sized piece of data. + // /// + // /// This can be retrieved through the `private` function and otherwise + // /// libcurl does not tamper with this value. This corresponds to + // /// `CURLOPT_PRIVATE` and defaults to 0. + // pub fn set_private(&mut self, private: usize) -> Result<(), Error> { + // self.setopt_ptr(curl_sys::CURLOPT_PRIVATE, private as *const _) + // } + // + // /// Fetches this handle's private pointer-sized piece of data. + // /// + // /// This corresponds to `CURLINFO_PRIVATE` and defaults to 0. + // pub fn private(&mut self) -> Result { + // self.getopt_ptr(curl_sys::CURLINFO_PRIVATE).map(|p| p as usize) + // } + + // ========================================================================= + // getters + + /// Get the last used URL + /// + /// In cases when you've asked libcurl to follow redirects, it may + /// not be the same value you set with `url`. + /// + /// This methods corresponds to the `CURLINFO_EFFECTIVE_URL` option. + /// + /// Returns `Ok(None)` if no effective url is listed or `Err` if an error + /// happens or the underlying bytes aren't valid utf-8. + pub fn effective_url(&mut self) -> Result, Error> { + self.getopt_str(curl_sys::CURLINFO_EFFECTIVE_URL) + } + + /// Get the last used URL, in bytes + /// + /// In cases when you've asked libcurl to follow redirects, it may + /// not be the same value you set with `url`. + /// + /// This methods corresponds to the `CURLINFO_EFFECTIVE_URL` option. + /// + /// Returns `Ok(None)` if no effective url is listed or `Err` if an error + /// happens or the underlying bytes aren't valid utf-8. + pub fn effective_url_bytes(&mut self) -> Result, Error> { + self.getopt_bytes(curl_sys::CURLINFO_EFFECTIVE_URL) + } + + /// Get the last response code + /// + /// The stored value will be zero if no server response code has been + /// received. Note that a proxy's CONNECT response should be read with + /// `http_connectcode` and not this. + /// + /// Corresponds to `CURLINFO_RESPONSE_CODE` and returns an error if this + /// option is not supported. + pub fn response_code(&mut self) -> Result { + self.getopt_long(curl_sys::CURLINFO_RESPONSE_CODE).map(|c| c as u32) + } + + /// Get the CONNECT response code + /// + /// Returns the last received HTTP proxy response code to a CONNECT request. + /// The returned value will be zero if no such response code was available. + /// + /// Corresponds to `CURLINFO_HTTP_CONNECTCODE` and returns an error if this + /// option is not supported. + pub fn http_connectcode(&mut self) -> Result { + self.getopt_long(curl_sys::CURLINFO_HTTP_CONNECTCODE).map(|c| c as u32) + } + + /// Get the remote time of the retrieved document + /// + /// Returns the remote time of the retrieved document (in number of seconds + /// since 1 Jan 1970 in the GMT/UTC time zone). If you get `None`, it can be + /// because of many reasons (it might be unknown, the server might hide it + /// or the server doesn't support the command that tells document time etc) + /// and the time of the document is unknown. + /// + /// Note that you must tell the server to collect this information before + /// the transfer is made, by using the `filetime` method to + /// or you will unconditionally get a `None` back. + /// + /// This corresponds to `CURLINFO_FILETIME` and may return an error if the + /// option is not supported + pub fn filetime(&mut self) -> Result, Error> { + self.getopt_long(curl_sys::CURLINFO_FILETIME).map(|r| { + if r == -1 { + None + } else { + Some(r as i64) + } + }) + } + + /// Get total time of previous transfer + /// + /// Returns the total time for the previous transfer, + /// including name resolving, TCP connect etc. + /// + /// Corresponds to `CURLINFO_TOTAL_TIME` and may return an error if the + /// option isn't supported. + pub fn total_time(&mut self) -> Result { + self.getopt_double(curl_sys::CURLINFO_TOTAL_TIME) + .map(double_seconds_to_duration) + } + + /// Get the name lookup time + /// + /// Returns the total time from the start + /// until the name resolving was completed. + /// + /// Corresponds to `CURLINFO_NAMELOOKUP_TIME` and may return an error if the + /// option isn't supported. + pub fn namelookup_time(&mut self) -> Result { + self.getopt_double(curl_sys::CURLINFO_NAMELOOKUP_TIME) + .map(double_seconds_to_duration) + } + + /// Get the time until connect + /// + /// Returns the total time from the start + /// until the connection to the remote host (or proxy) was completed. + /// + /// Corresponds to `CURLINFO_CONNECT_TIME` and may return an error if the + /// option isn't supported. + pub fn connect_time(&mut self) -> Result { + self.getopt_double(curl_sys::CURLINFO_CONNECT_TIME) + .map(double_seconds_to_duration) + } + + /// Get the time until the SSL/SSH handshake is completed + /// + /// Returns the total time it took from the start until the SSL/SSH + /// connect/handshake to the remote host was completed. This time is most often + /// very near to the `pretransfer_time` time, except for cases such as + /// HTTP pipelining where the pretransfer time can be delayed due to waits in + /// line for the pipeline and more. + /// + /// Corresponds to `CURLINFO_APPCONNECT_TIME` and may return an error if the + /// option isn't supported. + pub fn appconnect_time(&mut self) -> Result { + self.getopt_double(curl_sys::CURLINFO_APPCONNECT_TIME) + .map(double_seconds_to_duration) + } + + /// Get the time until the file transfer start + /// + /// Returns the total time it took from the start until the file + /// transfer is just about to begin. This includes all pre-transfer commands + /// and negotiations that are specific to the particular protocol(s) involved. + /// It does not involve the sending of the protocol- specific request that + /// triggers a transfer. + /// + /// Corresponds to `CURLINFO_PRETRANSFER_TIME` and may return an error if the + /// option isn't supported. + pub fn pretransfer_time(&mut self) -> Result { + self.getopt_double(curl_sys::CURLINFO_PRETRANSFER_TIME) + .map(double_seconds_to_duration) + } + + /// Get the time until the first byte is received + /// + /// Returns the total time it took from the start until the first + /// byte is received by libcurl. This includes `pretransfer_time` and + /// also the time the server needs to calculate the result. + /// + /// Corresponds to `CURLINFO_STARTTRANSFER_TIME` and may return an error if the + /// option isn't supported. + pub fn starttransfer_time(&mut self) -> Result { + self.getopt_double(curl_sys::CURLINFO_STARTTRANSFER_TIME) + .map(double_seconds_to_duration) + } + + /// Get the time for all redirection steps + /// + /// Returns the total time it took for all redirection steps + /// include name lookup, connect, pretransfer and transfer before final + /// transaction was started. `redirect_time` contains the complete + /// execution time for multiple redirections. + /// + /// Corresponds to `CURLINFO_REDIRECT_TIME` and may return an error if the + /// option isn't supported. + pub fn redirect_time(&mut self) -> Result { + self.getopt_double(curl_sys::CURLINFO_REDIRECT_TIME) + .map(double_seconds_to_duration) + } + + /// Get the number of redirects + /// + /// Corresponds to `CURLINFO_REDIRECT_COUNT` and may return an error if the + /// option isn't supported. + pub fn redirect_count(&mut self) -> Result { + self.getopt_long(curl_sys::CURLINFO_REDIRECT_COUNT).map(|c| c as u32) + } + + /// Get the URL a redirect would go to + /// + /// Returns the URL a redirect would take you to if you would enable + /// `follow_location`. This can come very handy if you think using the + /// built-in libcurl redirect logic isn't good enough for you but you would + /// still prefer to avoid implementing all the magic of figuring out the new + /// URL. + /// + /// Corresponds to `CURLINFO_REDIRECT_URL` and may return an error if the + /// url isn't valid utf-8 or an error happens. + pub fn redirect_url(&mut self) -> Result, Error> { + self.getopt_str(curl_sys::CURLINFO_REDIRECT_URL) + } + + /// Get the URL a redirect would go to, in bytes + /// + /// Returns the URL a redirect would take you to if you would enable + /// `follow_location`. This can come very handy if you think using the + /// built-in libcurl redirect logic isn't good enough for you but you would + /// still prefer to avoid implementing all the magic of figuring out the new + /// URL. + /// + /// Corresponds to `CURLINFO_REDIRECT_URL` and may return an error. + pub fn redirect_url_bytes(&mut self) -> Result, Error> { + self.getopt_bytes(curl_sys::CURLINFO_REDIRECT_URL) + } + + /// Get size of retrieved headers + /// + /// Corresponds to `CURLINFO_HEADER_SIZE` and may return an error if the + /// option isn't supported. + pub fn header_size(&mut self) -> Result { + self.getopt_long(curl_sys::CURLINFO_HEADER_SIZE).map(|c| c as u64) + } + + /// Get size of sent request. + /// + /// Corresponds to `CURLINFO_REQUEST_SIZE` and may return an error if the + /// option isn't supported. + pub fn request_size(&mut self) -> Result { + self.getopt_long(curl_sys::CURLINFO_REQUEST_SIZE).map(|c| c as u64) + } + + /// Get Content-Type + /// + /// Returns the content-type of the downloaded object. This is the value + /// read from the Content-Type: field. If you get `None`, it means that the + /// server didn't send a valid Content-Type header or that the protocol + /// used doesn't support this. + /// + /// Corresponds to `CURLINFO_CONTENT_TYPE` and may return an error if the + /// option isn't supported. + pub fn content_type(&mut self) -> Result, Error> { + self.getopt_str(curl_sys::CURLINFO_CONTENT_TYPE) + } + + /// Get Content-Type, in bytes + /// + /// Returns the content-type of the downloaded object. This is the value + /// read from the Content-Type: field. If you get `None`, it means that the + /// server didn't send a valid Content-Type header or that the protocol + /// used doesn't support this. + /// + /// Corresponds to `CURLINFO_CONTENT_TYPE` and may return an error if the + /// option isn't supported. + pub fn content_type_bytes(&mut self) -> Result, Error> { + self.getopt_bytes(curl_sys::CURLINFO_CONTENT_TYPE) + } + + /// Get errno number from last connect failure. + /// + /// Note that the value is only set on failure, it is not reset upon a + /// successful operation. The number is OS and system specific. + /// + /// Corresponds to `CURLINFO_OS_ERRNO` and may return an error if the + /// option isn't supported. + pub fn os_errno(&mut self) -> Result { + self.getopt_long(curl_sys::CURLINFO_OS_ERRNO).map(|c| c as i32) + } + + /// Get IP address of last connection. + /// + /// Returns a string holding the IP address of the most recent connection + /// done with this curl handle. This string may be IPv6 when that is + /// enabled. + /// + /// Corresponds to `CURLINFO_PRIMARY_IP` and may return an error if the + /// option isn't supported. + pub fn primary_ip(&mut self) -> Result, Error> { + self.getopt_str(curl_sys::CURLINFO_PRIMARY_IP) + } + + /// Get the latest destination port number + /// + /// Corresponds to `CURLINFO_PRIMARY_PORT` and may return an error if the + /// option isn't supported. + pub fn primary_port(&mut self) -> Result { + self.getopt_long(curl_sys::CURLINFO_PRIMARY_PORT).map(|c| c as u16) + } + + /// Get local IP address of last connection + /// + /// Returns a string holding the IP address of the local end of most recent + /// connection done with this curl handle. This string may be IPv6 when that + /// is enabled. + /// + /// Corresponds to `CURLINFO_LOCAL_IP` and may return an error if the + /// option isn't supported. + pub fn local_ip(&mut self) -> Result, Error> { + self.getopt_str(curl_sys::CURLINFO_LOCAL_IP) + } + + /// Get the latest local port number + /// + /// Corresponds to `CURLINFO_LOCAL_PORT` and may return an error if the + /// option isn't supported. + pub fn local_port(&mut self) -> Result { + self.getopt_long(curl_sys::CURLINFO_LOCAL_PORT).map(|c| c as u16) + } + + /// Get all known cookies + /// + /// Returns a linked-list of all cookies cURL knows (expired ones, too). + /// + /// Corresponds to the `CURLINFO_COOKIELIST` option and may return an error + /// if the option isn't supported. + pub fn cookies(&mut self) -> Result { + unsafe { + let mut list = 0 as *mut _; + let rc = curl_sys::curl_easy_getinfo(self.handle, + curl_sys::CURLINFO_COOKIELIST, + &mut list); + try!(self.cvt(rc)); + Ok(List { raw: list }) + } + } + + // ========================================================================= + // Other methods + + /// After options have been set, this will perform the transfer described by + /// the options. + /// + /// This performs the request in a synchronous fashion. This can be used + /// multiple times for one easy handle and libcurl will attempt to re-use + /// the same connection for all transfers. + /// + /// This method will preserve all options configured in this handle for the + /// next request, and if that is not desired then the options can be + /// manually reset or the `reset` method can be called. + /// + /// Note that this method takes `&self`, which is quite important! This + /// allows applications to close over the handle in various callbacks to + /// call methods like `unpause_write` and `unpause_read` while a transfer is + /// in progress. + pub fn perform(&self) -> Result<(), Error> { + unsafe { + self.reset_scoped_configuration(); + } + self.do_perform() + } + + fn do_perform(&self) -> Result<(), Error> { + if self.data.running.get() { + return Err(Error::new(curl_sys::CURLE_FAILED_INIT)) + } + + self.data.running.set(true); + let ret = unsafe { + self.cvt(curl_sys::curl_easy_perform(self.handle)) + }; + self.data.running.set(false); + panic::propagate(); + return ret + } + + /// Creates a new scoped transfer which can be used to set callbacks and + /// data which only live for the scope of the returned object. + /// + /// An `Easy` handle is often reused between different requests to cache + /// connections to servers, but often the lifetime of the data as part of + /// each transfer is unique. This function serves as an ability to share an + /// `Easy` across many transfers while ergonomically using possibly + /// stack-local data as part of each transfer. + /// + /// Configuration can be set on the `Easy` and then a `Transfer` can be + /// created to set scoped configuration (like callbacks). Finally, the + /// `perform` method on the `Transfer` function can be used. + /// + /// When the `Transfer` option is dropped then all configuration set on the + /// transfer itself will be reset. + pub fn transfer<'data, 'easy>(&'easy mut self) -> Transfer<'easy, 'data> { + // NB: We need to be *very* careful here about how we treat the + // callbacks set on a `Transfer`! It may be possible for that type + // to leak, and if we were to continue using the callbacks there + // there could possibly be use-after-free as they reference + // stack-local data. As a result, we attempt to be robust in the + // face of leaking a `Transfer` (one that didn't drop). + // + // What this basically amounts to is that whenever we poke libcurl that + // *might* call one of those callbacks or use some of that data we clear + // out everything that would have been set on a `Transfer` and instead + // start fresh. This call to `reset_scoped_configuration` will reset all + // callbacks based on the state in *this* handle which we know is still + // alive, so it's safe to configure. + // + // Also note that because we have to be resilient in the face of + // `Transfer` leaks anyway we just don't bother with a `Drop` impl and + // instead rely on this always running to reset any configuration. + assert!(!self.data.running.get()); + unsafe { + self.reset_scoped_configuration(); + } + Transfer { + data: Box::new(TransferData::default()), + easy: self, + } + } + + // See note above in `transfer` for what this is doing. + unsafe fn reset_scoped_configuration(&self) { + let EasyData { + ref write, + ref read, + ref seek, + ref debug, + ref header, + ref progress, + ref ssl_ctx, + ref running, + debug_set, + header_list: _, + form: _, + error_buf: _, + } = *self.data; + + // Can't reset while running, we'll detect this elsewhere + if running.get() { + return + } + + let ptr = |set| { + if set { + &*self.data as *const _ as *mut c_void + } else { + 0 as *mut _ + } + }; + + let write = ptr(write.is_some()); + let read = ptr(read.is_some()); + let seek = ptr(seek.is_some()); + let debug = ptr(debug.is_some()); + let header = ptr(header.is_some()); + let progress = ptr(progress.is_some()); + let ssl_ctx = ptr(ssl_ctx.is_some()); + + let _ = self.set_write_function(easy_write_cb, write); + let _ = self.set_read_function(easy_read_cb, read); + let _ = self.set_seek_function(easy_seek_cb, seek); + let _ = self.set_header_function(easy_header_cb, header); + let _ = self.set_progress_function(easy_progress_cb, progress); + let _ = self.set_ssl_ctx_function(easy_ssl_ctx_cb, ssl_ctx); + + // Don't reset the debug callback if we haven't set it yet to preserve + // the default behavior. + if debug_set { + let _ = self.set_debug_function(easy_debug_cb, debug); + } + + // Clear out the post fields which may be referencing stale data. + // curl_sys::curl_easy_setopt(easy, + // curl_sys::CURLOPT_POSTFIELDS, + // 0 as *const i32); + } + + /// Unpause reading on a connection. + /// + /// Using this function, you can explicitly unpause a connection that was + /// previously paused. + /// + /// A connection can be paused by letting the read or the write callbacks + /// return `ReadError::Pause` or `WriteError::Pause`. + /// + /// To unpause, you may for example call this from the progress callback + /// which gets called at least once per second, even if the connection is + /// paused. + /// + /// The chance is high that you will get your write callback called before + /// this function returns. + pub fn unpause_read(&self) -> Result<(), Error> { + unsafe { + let rc = curl_sys::curl_easy_pause(self.handle, + curl_sys::CURLPAUSE_RECV_CONT); + self.cvt(rc) + } + } + + /// Unpause writing on a connection. + /// + /// Using this function, you can explicitly unpause a connection that was + /// previously paused. + /// + /// A connection can be paused by letting the read or the write callbacks + /// return `ReadError::Pause` or `WriteError::Pause`. A write callback that + /// returns pause signals to the library that it couldn't take care of any + /// data at all, and that data will then be delivered again to the callback + /// when the writing is later unpaused. + /// + /// To unpause, you may for example call this from the progress callback + /// which gets called at least once per second, even if the connection is + /// paused. + pub fn unpause_write(&self) -> Result<(), Error> { + unsafe { + let rc = curl_sys::curl_easy_pause(self.handle, + curl_sys::CURLPAUSE_SEND_CONT); + self.cvt(rc) + } + } + + /// URL encodes a string `s` + pub fn url_encode(&mut self, s: &[u8]) -> String { + if s.len() == 0 { + return String::new() + } + unsafe { + let p = curl_sys::curl_easy_escape(self.handle, + s.as_ptr() as *const _, + s.len() as c_int); + assert!(!p.is_null()); + let ret = str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap(); + let ret = String::from(ret); + curl_sys::curl_free(p as *mut _); + return ret + } + } + + /// URL decodes a string `s`, returning `None` if it fails + pub fn url_decode(&mut self, s: &str) -> Vec { + if s.len() == 0 { + return Vec::new(); + } + + // Work around https://curl.haxx.se/docs/adv_20130622.html, a bug where + // if the last few characters are a bad escape then curl will have a + // buffer overrun. + let mut iter = s.chars().rev(); + let orig_len = s.len(); + let mut data; + let mut s = s; + if iter.next() == Some('%') || + iter.next() == Some('%') || + iter.next() == Some('%') { + data = s.to_string(); + data.push(0u8 as char); + s = &data[..]; + } + unsafe { + let mut len = 0; + let p = curl_sys::curl_easy_unescape(self.handle, + s.as_ptr() as *const _, + orig_len as c_int, + &mut len); + assert!(!p.is_null()); + let slice = slice::from_raw_parts(p as *const u8, len as usize); + let ret = slice.to_vec(); + curl_sys::curl_free(p as *mut _); + return ret + } + } + + // TODO: I don't think this is safe, you can drop this which has all the + // callback data and then the next is use-after-free + // + // /// Attempts to clone this handle, returning a new session handle with the + // /// same options set for this handle. + // /// + // /// Internal state info and things like persistent connections ccannot be + // /// transferred. + // /// + // /// # Errors + // /// + // /// If a new handle could not be allocated or another error happens, `None` + // /// is returned. + // pub fn try_clone<'b>(&mut self) -> Option> { + // unsafe { + // let handle = curl_sys::curl_easy_duphandle(self.handle); + // if handle.is_null() { + // None + // } else { + // Some(Easy { + // handle: handle, + // data: blank_data(), + // _marker: marker::PhantomData, + // }) + // } + // } + // } + + /// Re-initializes this handle to the default values. + /// + /// This puts the handle to the same state as it was in when it was just + /// created. This does, however, keep live connections, the session id + /// cache, the dns cache, and cookies. + pub fn reset(&mut self) { + unsafe { + curl_sys::curl_easy_reset(self.handle); + } + default_configure(self); + } + + /// Receives data from a connected socket. + /// + /// Only useful after a successful `perform` with the `connect_only` option + /// set as well. + pub fn recv(&mut self, data: &mut [u8]) -> Result { + unsafe { + let mut n = 0; + let r = curl_sys::curl_easy_recv(self.handle, + data.as_mut_ptr() as *mut _, + data.len(), + &mut n); + if r == curl_sys::CURLE_OK { + Ok(n) + } else { + Err(Error::new(r)) + } + } + } + + /// Sends data over the connected socket. + /// + /// Only useful after a successful `perform` with the `connect_only` option + /// set as well. + pub fn send(&mut self, data: &[u8]) -> Result { + unsafe { + let mut n = 0; + let rc = curl_sys::curl_easy_send(self.handle, + data.as_ptr() as *const _, + data.len(), + &mut n); + try!(self.cvt(rc)); + Ok(n) + } + } + + /// Get a pointer to the raw underlying CURL handle. + pub fn raw(&self) -> *mut curl_sys::CURL { + self.handle + } + + #[cfg(unix)] + fn setopt_path(&mut self, + opt: curl_sys::CURLoption, + val: &Path) -> Result<(), Error> { + use std::os::unix::prelude::*; + let s = try!(CString::new(val.as_os_str().as_bytes())); + self.setopt_str(opt, &s) + } + + #[cfg(windows)] + fn setopt_path(&mut self, + opt: curl_sys::CURLoption, + val: &Path) -> Result<(), Error> { + match val.to_str() { + Some(s) => self.setopt_str(opt, &try!(CString::new(s))), + None => Err(Error::new(curl_sys::CURLE_CONV_FAILED)), + } + } + + fn setopt_long(&mut self, + opt: curl_sys::CURLoption, + val: c_long) -> Result<(), Error> { + unsafe { + self.cvt(curl_sys::curl_easy_setopt(self.handle, opt, val)) + } + } + + fn setopt_str(&mut self, + opt: curl_sys::CURLoption, + val: &CStr) -> Result<(), Error> { + self.setopt_ptr(opt, val.as_ptr()) + } + + fn setopt_ptr(&self, + opt: curl_sys::CURLoption, + val: *const c_char) -> Result<(), Error> { + unsafe { + self.cvt(curl_sys::curl_easy_setopt(self.handle, opt, val)) + } + } + + fn setopt_off_t(&mut self, + opt: curl_sys::CURLoption, + val: curl_sys::curl_off_t) -> Result<(), Error> { + unsafe { + let rc = curl_sys::curl_easy_setopt(self.handle, opt, val); + self.cvt(rc) + } + } + + fn getopt_bytes(&mut self, opt: curl_sys::CURLINFO) + -> Result, Error> { + unsafe { + let p = try!(self.getopt_ptr(opt)); + if p.is_null() { + Ok(None) + } else { + Ok(Some(CStr::from_ptr(p).to_bytes())) + } + } + } + + fn getopt_ptr(&mut self, opt: curl_sys::CURLINFO) + -> Result<*const c_char, Error> { + unsafe { + let mut p = 0 as *const c_char; + let rc = curl_sys::curl_easy_getinfo(self.handle, opt, &mut p); + try!(self.cvt(rc)); + Ok(p) + } + } + + fn getopt_str(&mut self, opt: curl_sys::CURLINFO) + -> Result, Error> { + match self.getopt_bytes(opt) { + Ok(None) => Ok(None), + Err(e) => Err(e), + Ok(Some(bytes)) => { + match str::from_utf8(bytes) { + Ok(s) => Ok(Some(s)), + Err(_) => Err(Error::new(curl_sys::CURLE_CONV_FAILED)), + } + } + } + } + + fn getopt_long(&mut self, opt: curl_sys::CURLINFO) -> Result { + unsafe { + let mut p = 0; + let rc = curl_sys::curl_easy_getinfo(self.handle, opt, &mut p); + try!(self.cvt(rc)); + Ok(p) + } + } + + fn getopt_double(&mut self, opt: curl_sys::CURLINFO) -> Result { + unsafe { + let mut p = 0 as c_double; + let rc = curl_sys::curl_easy_getinfo(self.handle, opt, &mut p); + try!(self.cvt(rc)); + Ok(p) + } + } + + fn cvt(&self, rc: curl_sys::CURLcode) -> Result<(), Error> { + if rc == curl_sys::CURLE_OK { + return Ok(()) + } + let mut buf = self.data.error_buf.borrow_mut(); + if buf[0] == 0 { + return Err(Error::new(rc)) + } + let pos = buf.iter().position(|i| *i == 0).unwrap_or(buf.len()); + let msg = String::from_utf8_lossy(&buf[..pos]).into_owned(); + buf[0] = 0; + Err(::error::error_with_extra(rc, msg.into_boxed_str())) + } +} + +extern fn easy_write_cb(ptr: *mut c_char, + size: size_t, + nmemb: size_t, + data: *mut c_void) -> size_t { + write_cb(ptr, size, nmemb, data, |buf| unsafe { + (*(data as *mut EasyData)).write.as_mut().map(|f| f(buf)) + }) +} + +extern fn transfer_write_cb(ptr: *mut c_char, + size: size_t, + nmemb: size_t, + data: *mut c_void) -> size_t { + write_cb(ptr, size, nmemb, data, |buf| unsafe { + (*(data as *mut TransferData)).write.as_mut().map(|f| f(buf)) + }) +} + +fn write_cb(ptr: *mut c_char, + size: size_t, + nmemb: size_t, + data: *mut c_void, + f: F) + -> size_t + where F: FnOnce(&[u8]) -> Option> +{ + if data.is_null() { + return size * nmemb + } + panic::catch(|| unsafe { + let input = slice::from_raw_parts(ptr as *const u8, + size * nmemb); + match f(input) { + Some(Ok(s)) => s, + Some(Err(WriteError::Pause)) | + Some(Err(WriteError::__Nonexhaustive)) => { + curl_sys::CURL_WRITEFUNC_PAUSE + } + None => !0, + } + }).unwrap_or(!0) +} + +extern fn easy_read_cb(ptr: *mut c_char, + size: size_t, + nmemb: size_t, + data: *mut c_void) -> size_t { + read_cb(ptr, size, nmemb, data, |buf| unsafe { + (*(data as *mut EasyData)).read.as_mut().map(|f| f(buf)) + }) +} + +extern fn transfer_read_cb(ptr: *mut c_char, + size: size_t, + nmemb: size_t, + data: *mut c_void) -> size_t { + read_cb(ptr, size, nmemb, data, |buf| unsafe { + (*(data as *mut TransferData)).read.as_mut().map(|f| f(buf)) + }) +} + +fn read_cb(ptr: *mut c_char, + size: size_t, + nmemb: size_t, + data: *mut c_void, + f: F) -> size_t + where F: FnOnce(&mut [u8]) -> Option> +{ + unsafe { + if data.is_null() { + return 0 + } + let input = slice::from_raw_parts_mut(ptr as *mut u8, + size * nmemb); + panic::catch(|| { + match f(input) { + Some(Ok(s)) => s, + Some(Err(ReadError::Pause)) => { + curl_sys::CURL_READFUNC_PAUSE + } + Some(Err(ReadError::__Nonexhaustive)) | + Some(Err(ReadError::Abort)) => { + curl_sys::CURL_READFUNC_ABORT + } + None => !0, + } + }).unwrap_or(!0) + } +} + +extern fn easy_seek_cb(data: *mut c_void, + offset: curl_sys::curl_off_t, + origin: c_int) -> c_int { + seek_cb(data, offset, origin, |s| unsafe { + (*(data as *mut EasyData)).seek.as_mut().map(|f| f(s)) + }) +} + +extern fn transfer_seek_cb(data: *mut c_void, + offset: curl_sys::curl_off_t, + origin: c_int) -> c_int { + seek_cb(data, offset, origin, |s| unsafe { + (*(data as *mut TransferData)).seek.as_mut().map(|f| f(s)) + }) +} + +fn seek_cb(data: *mut c_void, + offset: curl_sys::curl_off_t, + origin: c_int, + f: F) -> c_int + where F: FnOnce(SeekFrom) -> Option +{ + if data.is_null() { + return -1 + } + panic::catch(|| { + let from = if origin == libc::SEEK_SET { + SeekFrom::Start(offset as u64) + } else { + panic!("unknown origin from libcurl: {}", origin); + }; + match f(from) { + Some(to) => to as c_int, + None => -1, + } + }).unwrap_or(!0) +} + +extern fn easy_progress_cb(data: *mut c_void, + dltotal: c_double, + dlnow: c_double, + ultotal: c_double, + ulnow: c_double) -> c_int { + progress_cb(data, dltotal, dlnow, ultotal, ulnow, |a, b, c, d| unsafe { + (*(data as *mut EasyData)).progress.as_mut().map(|f| f(a, b, c, d)) + }) +} + +extern fn transfer_progress_cb(data: *mut c_void, + dltotal: c_double, + dlnow: c_double, + ultotal: c_double, + ulnow: c_double) -> c_int { + progress_cb(data, dltotal, dlnow, ultotal, ulnow, |a, b, c, d| unsafe { + (*(data as *mut TransferData)).progress.as_mut().map(|f| f(a, b, c, d)) + }) +} + +fn progress_cb(data: *mut c_void, + dltotal: c_double, + dlnow: c_double, + ultotal: c_double, + ulnow: c_double, + f: F) -> c_int + where F: FnOnce(f64, f64, f64, f64) -> Option, +{ + if data.is_null() { + return 0 + } + let keep_going = panic::catch(|| { + f(dltotal, dlnow, ultotal, ulnow).unwrap_or(false) + }).unwrap_or(false); + if keep_going { + 0 + } else { + 1 + } +} + +extern fn easy_ssl_ctx_cb(handle: *mut curl_sys::CURL, + ssl_ctx: *mut c_void, + data: *mut c_void) -> curl_sys::CURLcode { + + ssl_ctx_cb(handle, ssl_ctx, data, |ssl_ctx| unsafe { + match (*(data as *mut EasyData)).ssl_ctx.as_mut() { + Some(f) => f(ssl_ctx), + // If the callback isn't set we just tell CURL to + // continue. + None => Ok(()), + } + }) +} + +extern fn transfer_ssl_ctx_cb(handle: *mut curl_sys::CURL, + ssl_ctx: *mut c_void, + data: *mut c_void) -> curl_sys::CURLcode { + + ssl_ctx_cb(handle, ssl_ctx, data, |ssl_ctx| unsafe { + match (*(data as *mut TransferData)).ssl_ctx.as_mut() { + Some(f) => f(ssl_ctx), + // If the callback isn't set we just tell CURL to + // continue. + None => Ok(()), + } + }) +} + +// TODO: same thing as `debug_cb`: can we expose `handle`? +fn ssl_ctx_cb(_handle: *mut curl_sys::CURL, + ssl_ctx: *mut c_void, + data: *mut c_void, + f: F) -> curl_sys::CURLcode + where F: FnOnce(*mut c_void) -> Result<(), Error> +{ + if data.is_null() { + return curl_sys::CURLE_OK; + } + + let result = panic::catch(|| { + f(ssl_ctx) + }); + + match result { + Some(Ok(())) => curl_sys::CURLE_OK, + Some(Err(e)) => e.code(), + // Default to a generic SSL error in case of panic. This + // shouldn't really matter since the error should be + // propagated later on but better safe than sorry... + None => curl_sys::CURLE_SSL_CONNECT_ERROR, + } +} + +extern fn easy_debug_cb(handle: *mut curl_sys::CURL, + kind: curl_sys::curl_infotype, + data: *mut c_char, + size: size_t, + userptr: *mut c_void) -> c_int { + debug_cb(handle, kind, data, size, userptr, |a, b| unsafe { + (*(userptr as *mut EasyData)).debug.as_mut().map(|f| f(a, b)) + }) +} + +extern fn transfer_debug_cb(handle: *mut curl_sys::CURL, + kind: curl_sys::curl_infotype, + data: *mut c_char, + size: size_t, + userptr: *mut c_void) -> c_int { + debug_cb(handle, kind, data, size, userptr, |a, b| unsafe { + (*(userptr as *mut TransferData)).debug.as_mut().map(|f| f(a, b)) + }) +} + +// TODO: expose `handle`? is that safe? +fn debug_cb(_handle: *mut curl_sys::CURL, + kind: curl_sys::curl_infotype, + data: *mut c_char, + size: size_t, + userptr: *mut c_void, + f: F) -> c_int + where F: FnOnce(InfoType, &[u8]) -> Option<()> +{ + if userptr.is_null() { + return 0 + } + panic::catch(|| unsafe { + let data = slice::from_raw_parts(data as *const u8, size); + let kind = match kind { + curl_sys::CURLINFO_TEXT => InfoType::Text, + curl_sys::CURLINFO_HEADER_IN => InfoType::HeaderIn, + curl_sys::CURLINFO_HEADER_OUT => InfoType::HeaderOut, + curl_sys::CURLINFO_DATA_IN => InfoType::DataIn, + curl_sys::CURLINFO_DATA_OUT => InfoType::DataOut, + curl_sys::CURLINFO_SSL_DATA_IN => InfoType::SslDataIn, + curl_sys::CURLINFO_SSL_DATA_OUT => InfoType::SslDataOut, + _ => return, + }; + f(kind, data); + }); + return 0 +} + +extern fn easy_header_cb(buffer: *mut c_char, + size: size_t, + nitems: size_t, + userptr: *mut c_void) -> size_t { + header_cb(buffer, size, nitems, userptr, |buf| unsafe { + (*(userptr as *mut EasyData)).header.as_mut().map(|f| f(buf)) + }) +} + +extern fn transfer_header_cb(buffer: *mut c_char, + size: size_t, + nitems: size_t, + userptr: *mut c_void) -> size_t { + header_cb(buffer, size, nitems, userptr, |buf| unsafe { + (*(userptr as *mut TransferData)).header.as_mut().map(|f| f(buf)) + }) +} + +fn header_cb(buffer: *mut c_char, + size: size_t, + nitems: size_t, + userptr: *mut c_void, + f: F) -> size_t + where F: FnOnce(&[u8]) -> Option, +{ + if userptr.is_null() { + return size * nitems + } + let keep_going = panic::catch(|| unsafe { + let data = slice::from_raw_parts(buffer as *const u8, + size * nitems); + f(data).unwrap_or(false) + }).unwrap_or(false); + if keep_going { + size * nitems + } else { + !0 + } +} + +impl<'easy, 'data> Transfer<'easy, 'data> { + /// Same as `Easy::write_function`, just takes a non `'static` lifetime + /// corresponding to the lifetime of this transfer. + pub fn write_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(&[u8]) -> Result + 'data + { + self.data.write = Some(Box::new(f)); + unsafe { + self.easy.set_write_function(transfer_write_cb, + &*self.data as *const _ as *mut _) + } + } + + /// Same as `Easy::read_function`, just takes a non `'static` lifetime + /// corresponding to the lifetime of this transfer. + pub fn read_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(&mut [u8]) -> Result + 'data + { + self.data.read = Some(Box::new(f)); + unsafe { + self.easy.set_read_function(transfer_read_cb, + &*self.data as *const _ as *mut _) + } + } + + /// Same as `Easy::seek_function`, just takes a non `'static` lifetime + /// corresponding to the lifetime of this transfer. + pub fn seek_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(SeekFrom) -> SeekResult + 'data + { + self.data.seek = Some(Box::new(f)); + unsafe { + self.easy.set_seek_function(transfer_seek_cb, + &*self.data as *const _ as *mut _) + } + } + + /// Same as `Easy::progress_function`, just takes a non `'static` lifetime + /// corresponding to the lifetime of this transfer. + pub fn progress_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(f64, f64, f64, f64) -> bool + 'data + { + self.data.progress = Some(Box::new(f)); + unsafe { + self.easy.set_progress_function(transfer_progress_cb, + &*self.data as *const _ as *mut _) + } + } + + /// Same as `Easy::ssl_ctx_function`, just takes a non `'static` + /// lifetime corresponding to the lifetime of this transfer. + pub fn ssl_ctx_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(*mut c_void) -> Result<(), Error> + Send + 'data + { + self.data.ssl_ctx = Some(Box::new(f)); + unsafe { + self.easy.set_ssl_ctx_function(transfer_ssl_ctx_cb, + &*self.data as *const _ as *mut _) + } + } + + /// Same as `Easy::debug_function`, just takes a non `'static` lifetime + /// corresponding to the lifetime of this transfer. + pub fn debug_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(InfoType, &[u8]) + 'data + { + self.data.debug = Some(Box::new(f)); + self.easy.data.debug_set = true; + unsafe { + self.easy.set_debug_function(transfer_debug_cb, + &*self.data as *const _ as *mut _) + } + } + + /// Same as `Easy::header_function`, just takes a non `'static` lifetime + /// corresponding to the lifetime of this transfer. + pub fn header_function(&mut self, f: F) -> Result<(), Error> + where F: FnMut(&[u8]) -> bool + 'data + { + self.data.header = Some(Box::new(f)); + unsafe { + self.easy.set_header_function(transfer_header_cb, + &*self.data as *const _ as *mut _) + } + } + + // TODO: need to figure out how to expose this, but it also needs to be + // reset as part of `reset_scoped_configuration` above. Unfortunately + // setting `CURLOPT_POSTFIELDS` to null will switch the request to + // POST, which is not what we want. + // + // /// Configures the data that will be uploaded as part of a POST. + // /// + // /// By default this option is not set and corresponds to + // /// `CURLOPT_POSTFIELDS`. + // pub fn post_fields(&mut self, data: &'data [u8]) -> Result<(), Error> { + // // Set the length before the pointer so libcurl knows how much to read + // try!(self.easy.post_field_size(data.len() as u64)); + // self.easy.setopt_ptr(curl_sys::CURLOPT_POSTFIELDS, + // data.as_ptr() as *const _) + // } + + /// Same as `Easy::transfer`. + pub fn perform(&self) -> Result<(), Error> { + self.easy.do_perform() + } + + /// Same as `Easy::unpause_read`. + pub fn unpause_read(&self) -> Result<(), Error> { + self.easy.unpause_read() + } + + /// Same as `Easy::unpause_write` + pub fn unpause_write(&self) -> Result<(), Error> { + self.easy.unpause_write() + } +} + +fn double_seconds_to_duration(seconds: f64) -> Duration { + let whole_seconds = seconds.trunc() as u64; + let nanos = seconds.fract() * 1_000_000_000f64; + Duration::new(whole_seconds, nanos as u32) +} + +#[test] +fn double_seconds_to_duration_whole_second() { + let dur = double_seconds_to_duration(1.0); + assert_eq!(dur.as_secs(), 1); + assert_eq!(dur.subsec_nanos(), 0); +} + +#[test] +fn double_seconds_to_duration_sub_second1() { + let dur = double_seconds_to_duration(0.0); + assert_eq!(dur.as_secs(), 0); + assert_eq!(dur.subsec_nanos(), 0); +} + +#[test] +fn double_seconds_to_duration_sub_second2() { + let dur = double_seconds_to_duration(0.5); + assert_eq!(dur.as_secs(), 0); + assert_eq!(dur.subsec_nanos(), 500_000_000); +} + +fn default_configure(handle: &mut Easy) { + handle.data.error_buf = RefCell::new(vec![0; curl_sys::CURL_ERROR_SIZE]); + handle.setopt_ptr(curl_sys::CURLOPT_ERRORBUFFER, + handle.data.error_buf.borrow().as_ptr() as *const _) + .expect("failed to set error buffer"); + let _ = handle.signal(false); + ssl_configure(handle); +} + +#[cfg(all(unix, not(target_os = "macos")))] +fn ssl_configure(handle: &mut Easy) { + let probe = ::openssl_probe::probe(); + if let Some(ref path) = probe.cert_file { + let _ = handle.cainfo(path); + } + if let Some(ref path) = probe.cert_dir { + let _ = handle.capath(path); + } +} + +#[cfg(not(all(unix, not(target_os = "macos"))))] +fn ssl_configure(_handle: &mut Easy) {} + +impl Drop for Easy { + fn drop(&mut self) { + unsafe { + curl_sys::curl_easy_cleanup(self.handle); + } + } +} + +impl List { + /// Creates a new empty list of strings. + pub fn new() -> List { + List { raw: 0 as *mut _ } + } + + /// Appends some data into this list. + pub fn append(&mut self, data: &str) -> Result<(), Error> { + let data = try!(CString::new(data)); + unsafe { + let raw = curl_sys::curl_slist_append(self.raw, data.as_ptr()); + assert!(!raw.is_null()); + self.raw = raw; + Ok(()) + } + } + + /// Returns an iterator over the nodes in this list. + pub fn iter(&self) -> Iter { + Iter { _me: self, cur: self.raw } + } +} + +impl Drop for List { + fn drop(&mut self) { + unsafe { + curl_sys::curl_slist_free_all(self.raw) + } + } +} + +impl<'a> Iterator for Iter<'a> { + type Item = &'a [u8]; + + fn next(&mut self) -> Option<&'a [u8]> { + if self.cur.is_null() { + return None + } + + unsafe { + let ret = Some(CStr::from_ptr((*self.cur).data).to_bytes()); + self.cur = (*self.cur).next; + return ret + } + } +} + +impl Form { + /// Creates a new blank form ready for the addition of new data. + pub fn new() -> Form { + Form { + head: 0 as *mut _, + tail: 0 as *mut _, + headers: Vec::new(), + buffers: Vec::new(), + strings: Vec::new(), + } + } + + /// Prepares adding a new part to this `Form` + /// + /// Note that the part is not actually added to the form until the `add` + /// method is called on `Part`, which may or may not fail. + pub fn part<'a, 'data>(&'a mut self, name: &'data str) -> Part<'a, 'data> { + Part { + error: None, + form: self, + name: name, + array: vec![curl_sys::curl_forms { + option: curl_sys::CURLFORM_END, + value: 0 as *mut _, + }], + } + } +} + +impl Drop for Form { + fn drop(&mut self) { + unsafe { + curl_sys::curl_formfree(self.head); + } + } +} + +impl<'form, 'data> Part<'form, 'data> { + /// A pointer to the contents of this part, the actual data to send away. + pub fn contents(&mut self, contents: &'data [u8]) -> &mut Self { + let pos = self.array.len() - 1; + self.array.insert(pos, curl_sys::curl_forms { + option: curl_sys::CURLFORM_COPYCONTENTS, + value: contents.as_ptr() as *mut _, + }); + self.array.insert(pos + 1, curl_sys::curl_forms { + option: curl_sys::CURLFORM_CONTENTSLENGTH, + value: contents.len() as *mut _, + }); + self + } + + /// Causes this file to be read and its contents used as data in this part + /// + /// This part does not automatically become a file upload part simply + /// because its data was read from a file. + /// + /// # Errors + /// + /// If the filename has any internal nul bytes or if on Windows it does not + /// contain a unicode filename then the `add` function will eventually + /// return an error. + pub fn file_content

(&mut self, file: P) -> &mut Self + where P: AsRef + { + self._file_content(file.as_ref()) + } + + fn _file_content(&mut self, file: &Path) -> &mut Self { + if let Some(bytes) = self.path2cstr(file) { + let pos = self.array.len() - 1; + self.array.insert(pos, curl_sys::curl_forms { + option: curl_sys::CURLFORM_FILECONTENT, + value: bytes.as_ptr() as *mut _, + }); + self.form.strings.push(bytes); + } + self + } + + /// Makes this part a file upload part of the given file. + /// + /// Sets the filename field to the basename of the provided file name, and + /// it reads the contents of the file and passes them as data and sets the + /// content type if the given file matches one of the internally known file + /// extensions. + /// + /// The given upload file must exist entirely on the filesystem before the + /// upload is started because libcurl needs to read the size of it + /// beforehand. + /// + /// Multiple files can be uploaded by calling this method multiple times and + /// content types can also be configured for each file (by calling that + /// next). + /// + /// # Errors + /// + /// If the filename has any internal nul bytes or if on Windows it does not + /// contain a unicode filename then this function will cause `add` to return + /// an error when called. + pub fn file(&mut self, file: &'data P) -> &mut Self + where P: AsRef + { + self._file(file.as_ref()) + } + + fn _file(&mut self, file: &'data Path) -> &mut Self { + if let Some(bytes) = self.path2cstr(file) { + let pos = self.array.len() - 1; + self.array.insert(pos, curl_sys::curl_forms { + option: curl_sys::CURLFORM_FILE, + value: bytes.as_ptr() as *mut _, + }); + self.form.strings.push(bytes); + } + self + } + + /// Used in combination with `Part::file`, provides the content-type for + /// this part, possibly instead of choosing an internal one. + /// + /// # Panics + /// + /// This function will panic if `content_type` contains an internal nul + /// byte. + pub fn content_type(&mut self, content_type: &'data str) -> &mut Self { + if let Some(bytes) = self.bytes2cstr(content_type.as_bytes()) { + let pos = self.array.len() - 1; + self.array.insert(pos, curl_sys::curl_forms { + option: curl_sys::CURLFORM_CONTENTTYPE, + value: bytes.as_ptr() as *mut _, + }); + self.form.strings.push(bytes); + } + self + } + + /// Used in combination with `Part::file`, provides the filename for + /// this part instead of the actual one. + /// + /// # Errors + /// + /// If `name` contains an internal nul byte, or if on Windows the path is + /// not valid unicode then this function will return an error when `add` is + /// called. + pub fn filename(&mut self, name: &'data P) -> &mut Self + where P: AsRef + { + self._filename(name.as_ref()) + } + + fn _filename(&mut self, name: &'data Path) -> &mut Self { + if let Some(bytes) = self.path2cstr(name) { + let pos = self.array.len() - 1; + self.array.insert(pos, curl_sys::curl_forms { + option: curl_sys::CURLFORM_FILENAME, + value: bytes.as_ptr() as *mut _, + }); + self.form.strings.push(bytes); + } + self + } + + /// This is used to provide a custom file upload part without using the + /// `file` method above. + /// + /// The first parameter is for the filename field and the second is the + /// in-memory contents. + /// + /// # Errors + /// + /// If `name` contains an internal nul byte, or if on Windows the path is + /// not valid unicode then this function will return an error when `add` is + /// called. + pub fn buffer(&mut self, name: &'data P, data: Vec) + -> &mut Self + where P: AsRef + { + self._buffer(name.as_ref(), data) + } + + fn _buffer(&mut self, name: &'data Path, data: Vec) -> &mut Self { + if let Some(bytes) = self.path2cstr(name) { + let pos = self.array.len() - 1; + self.array.insert(pos, curl_sys::curl_forms { + option: curl_sys::CURLFORM_BUFFER, + value: bytes.as_ptr() as *mut _, + }); + self.form.strings.push(bytes); + self.array.insert(pos + 1, curl_sys::curl_forms { + option: curl_sys::CURLFORM_BUFFERPTR, + value: data.as_ptr() as *mut _, + }); + self.array.insert(pos + 2, curl_sys::curl_forms { + option: curl_sys::CURLFORM_BUFFERLENGTH, + value: data.len() as *mut _, + }); + self.form.buffers.push(data); + } + self + } + + /// Specifies extra headers for the form POST section. + /// + /// Appends the list of headers to those libcurl automatically generates. + pub fn content_header(&mut self, headers: List) -> &mut Self { + let pos = self.array.len() - 1; + self.array.insert(pos, curl_sys::curl_forms { + option: curl_sys::CURLFORM_CONTENTHEADER, + value: headers.raw as *mut _, + }); + self.form.headers.push(headers); + self + } + + /// Attempts to add this part to the `Form` that it was created from. + /// + /// If any error happens while adding, that error is returned, otherwise + /// `Ok(())` is returned. + pub fn add(&mut self) -> Result<(), FormError> { + if let Some(err) = self.error.clone() { + return Err(err) + } + let rc = unsafe { + curl_sys::curl_formadd(&mut self.form.head, + &mut self.form.tail, + curl_sys::CURLFORM_COPYNAME, + self.name.as_ptr(), + curl_sys::CURLFORM_NAMELENGTH, + self.name.len(), + curl_sys::CURLFORM_ARRAY, + self.array.as_ptr(), + curl_sys::CURLFORM_END) + }; + if rc == curl_sys::CURL_FORMADD_OK { + Ok(()) + } else { + Err(FormError::new(rc)) + } + } + + #[cfg(unix)] + fn path2cstr(&mut self, p: &Path) -> Option { + use std::os::unix::prelude::*; + self.bytes2cstr(p.as_os_str().as_bytes()) + } + + #[cfg(windows)] + fn path2cstr(&mut self, p: &Path) -> Option { + match p.to_str() { + Some(bytes) => self.bytes2cstr(bytes.as_bytes()), + None if self.error.is_none() => { + // TODO: better error code + self.error = Some(FormError::new(curl_sys::CURL_FORMADD_INCOMPLETE)); + None + } + None => None, + } + } + + fn bytes2cstr(&mut self, bytes: &[u8]) -> Option { + match CString::new(bytes) { + Ok(c) => Some(c), + Err(..) if self.error.is_none() => { + // TODO: better error code + self.error = Some(FormError::new(curl_sys::CURL_FORMADD_INCOMPLETE)); + None + } + Err(..) => None, + } + } +} + +impl Auth { + /// Creates a new set of authentications with no members. + /// + /// An `Auth` structure is used to configure which forms of authentication + /// are attempted when negotiating connections with servers. + pub fn new() -> Auth { + Auth { bits: 0 } + } + + /// HTTP Basic authentication. + /// + /// This is the default choice, and the only method that is in wide-spread + /// use and supported virtually everywhere. This sends the user name and + /// password over the network in plain text, easily captured by others. + pub fn basic(&mut self, on: bool) -> &mut Auth { + self.flag(curl_sys::CURLAUTH_BASIC, on) + } + + /// HTTP Digest authentication. + /// + /// Digest authentication is defined in RFC 2617 and is a more secure way to + /// do authentication over public networks than the regular old-fashioned + /// Basic method. + pub fn digest(&mut self, on: bool) -> &mut Auth { + self.flag(curl_sys::CURLAUTH_DIGEST, on) + } + + /// HTTP Digest authentication with an IE flavor. + /// + /// Digest authentication is defined in RFC 2617 and is a more secure way to + /// do authentication over public networks than the regular old-fashioned + /// Basic method. The IE flavor is simply that libcurl will use a special + /// "quirk" that IE is known to have used before version 7 and that some + /// servers require the client to use. + pub fn digest_ie(&mut self, on: bool) -> &mut Auth { + self.flag(curl_sys::CURLAUTH_DIGEST_IE, on) + } + + /// HTTP Negotiate (SPNEGO) authentication. + /// + /// Negotiate authentication is defined in RFC 4559 and is the most secure + /// way to perform authentication over HTTP. + /// + /// You need to build libcurl with a suitable GSS-API library or SSPI on + /// Windows for this to work. + pub fn gssnegotiate(&mut self, on: bool) -> &mut Auth { + self.flag(curl_sys::CURLAUTH_GSSNEGOTIATE, on) + } + + /// HTTP NTLM authentication. + /// + /// A proprietary protocol invented and used by Microsoft. It uses a + /// challenge-response and hash concept similar to Digest, to prevent the + /// password from being eavesdropped. + /// + /// You need to build libcurl with either OpenSSL, GnuTLS or NSS support for + /// this option to work, or build libcurl on Windows with SSPI support. + pub fn ntlm(&mut self, on: bool) -> &mut Auth { + self.flag(curl_sys::CURLAUTH_NTLM, on) + } + + /// NTLM delegating to winbind helper. + /// + /// Authentication is performed by a separate binary application that is + /// executed when needed. The name of the application is specified at + /// compile time but is typically /usr/bin/ntlm_auth + /// + /// Note that libcurl will fork when necessary to run the winbind + /// application and kill it when complete, calling waitpid() to await its + /// exit when done. On POSIX operating systems, killing the process will + /// cause a SIGCHLD signal to be raised (regardless of whether + /// CURLOPT_NOSIGNAL is set), which must be handled intelligently by the + /// application. In particular, the application must not unconditionally + /// call wait() in its SIGCHLD signal handler to avoid being subject to a + /// race condition. This behavior is subject to change in future versions of + /// libcurl. + /// + /// A proprietary protocol invented and used by Microsoft. It uses a + /// challenge-response and hash concept similar to Digest, to prevent the + /// password from being eavesdropped. + pub fn ntlm_wb(&mut self, on: bool) -> &mut Auth { + self.flag(curl_sys::CURLAUTH_NTLM_WB, on) + } + + fn flag(&mut self, bit: c_ulong, on: bool) -> &mut Auth { + if on { + self.bits |= bit as c_long; + } else { + self.bits &= !bit as c_long; + } + self + } +} + +impl SslOpt { + /// Creates a new set of SSL options. + pub fn new() -> SslOpt { + SslOpt { bits: 0 } + } + + /// Tells libcurl to disable certificate revocation checks for those SSL + /// backends where such behavior is present. + /// + /// Currently this option is only supported for WinSSL (the native Windows + /// SSL library), with an exception in the case of Windows' Untrusted + /// Publishers blacklist which it seems can't be bypassed. This option may + /// have broader support to accommodate other SSL backends in the future. + /// https://curl.haxx.se/docs/ssl-compared.html + pub fn no_revoke(&mut self, on: bool) -> &mut SslOpt { + self.flag(curl_sys::CURLSSLOPT_NO_REVOKE, on) + } + + /// Tells libcurl to not attempt to use any workarounds for a security flaw + /// in the SSL3 and TLS1.0 protocols. + /// + /// If this option isn't used or this bit is set to 0, the SSL layer libcurl + /// uses may use a work-around for this flaw although it might cause + /// interoperability problems with some (older) SSL implementations. + /// + /// > WARNING: avoiding this work-around lessens the security, and by + /// > setting this option to 1 you ask for exactly that. This option is only + /// > supported for DarwinSSL, NSS and OpenSSL. + pub fn allow_beast(&mut self, on: bool) -> &mut SslOpt { + self.flag(curl_sys::CURLSSLOPT_ALLOW_BEAST, on) + } + + fn flag(&mut self, bit: c_long, on: bool) -> &mut SslOpt { + if on { + self.bits |= bit as c_long; + } else { + self.bits &= !bit as c_long; + } + self + } +} diff -Nru cargo-0.17.0/vendor/curl-0.4.6/src/error.rs cargo-0.19.0/vendor/curl-0.4.6/src/error.rs --- cargo-0.17.0/vendor/curl-0.4.6/src/error.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/src/error.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,598 @@ +use std::error; +use std::ffi::{self, CStr}; +use std::fmt; +use std::str; +use std::io; + +use curl_sys; + +/// An error returned from various "easy" operations. +/// +/// This structure wraps a `CURLcode`. +#[derive(Clone, PartialEq)] +pub struct Error { + code: curl_sys::CURLcode, + extra: Option>, +} + +pub fn error_with_extra(code: curl_sys::CURLcode, extra: Box) -> Error { + Error { + code: code, + extra: Some(extra), + } +} + +impl Error { + /// Creates a new error from the underlying code returned by libcurl. + pub fn new(code: curl_sys::CURLcode) -> Error { + Error { + code: code, + extra: None, + } + } + + /// Returns whether this error corresponds to CURLE_UNSUPPORTED_PROTOCOL. + pub fn is_unsupported_protocol(&self) -> bool { + self.code == curl_sys::CURLE_UNSUPPORTED_PROTOCOL + } + + /// Returns whether this error corresponds to CURLE_FAILED_INIT. + pub fn is_failed_init(&self) -> bool { + self.code == curl_sys::CURLE_FAILED_INIT + } + + /// Returns whether this error corresponds to CURLE_URL_MALFORMAT. + pub fn is_url_malformed(&self) -> bool { + self.code == curl_sys::CURLE_URL_MALFORMAT + } + + // /// Returns whether this error corresponds to CURLE_NOT_BUILT_IN. + // pub fn is_not_built_in(&self) -> bool { + // self.code == curl_sys::CURLE_NOT_BUILT_IN + // } + + /// Returns whether this error corresponds to CURLE_COULDNT_RESOLVE_PROXY. + pub fn is_couldnt_resolve_proxy(&self) -> bool { + self.code == curl_sys::CURLE_COULDNT_RESOLVE_PROXY + } + + /// Returns whether this error corresponds to CURLE_COULDNT_RESOLVE_HOST. + pub fn is_couldnt_resolve_host(&self) -> bool { + self.code == curl_sys::CURLE_COULDNT_RESOLVE_HOST + } + + /// Returns whether this error corresponds to CURLE_COULDNT_CONNECT. + pub fn is_couldnt_connect(&self) -> bool { + self.code == curl_sys::CURLE_COULDNT_CONNECT + } + + /// Returns whether this error corresponds to CURLE_REMOTE_ACCESS_DENIED. + pub fn is_remote_access_denied(&self) -> bool { + self.code == curl_sys::CURLE_REMOTE_ACCESS_DENIED + } + + /// Returns whether this error corresponds to CURLE_PARTIAL_FILE. + pub fn is_partial_file(&self) -> bool { + self.code == curl_sys::CURLE_PARTIAL_FILE + } + + /// Returns whether this error corresponds to CURLE_QUOTE_ERROR. + pub fn is_quote_error(&self) -> bool { + self.code == curl_sys::CURLE_QUOTE_ERROR + } + + /// Returns whether this error corresponds to CURLE_HTTP_RETURNED_ERROR. + pub fn is_http_returned_error(&self) -> bool { + self.code == curl_sys::CURLE_HTTP_RETURNED_ERROR + } + + /// Returns whether this error corresponds to CURLE_READ_ERROR. + pub fn is_read_error(&self) -> bool { + self.code == curl_sys::CURLE_READ_ERROR + } + + /// Returns whether this error corresponds to CURLE_WRITE_ERROR. + pub fn is_write_error(&self) -> bool { + self.code == curl_sys::CURLE_WRITE_ERROR + } + + /// Returns whether this error corresponds to CURLE_UPLOAD_FAILED. + pub fn is_upload_failed(&self) -> bool { + self.code == curl_sys::CURLE_UPLOAD_FAILED + } + + /// Returns whether this error corresponds to CURLE_OUT_OF_MEMORY. + pub fn is_out_of_memory(&self) -> bool { + self.code == curl_sys::CURLE_OUT_OF_MEMORY + } + + /// Returns whether this error corresponds to CURLE_OPERATION_TIMEDOUT. + pub fn is_operation_timedout(&self) -> bool { + self.code == curl_sys::CURLE_OPERATION_TIMEDOUT + } + + /// Returns whether this error corresponds to CURLE_RANGE_ERROR. + pub fn is_range_error(&self) -> bool { + self.code == curl_sys::CURLE_RANGE_ERROR + } + + /// Returns whether this error corresponds to CURLE_HTTP_POST_ERROR. + pub fn is_http_post_error(&self) -> bool { + self.code == curl_sys::CURLE_HTTP_POST_ERROR + } + + /// Returns whether this error corresponds to CURLE_SSL_CONNECT_ERROR. + pub fn is_ssl_connect_error(&self) -> bool { + self.code == curl_sys::CURLE_SSL_CONNECT_ERROR + } + + /// Returns whether this error corresponds to CURLE_BAD_DOWNLOAD_RESUME. + pub fn is_bad_download_resume(&self) -> bool { + self.code == curl_sys::CURLE_BAD_DOWNLOAD_RESUME + } + + /// Returns whether this error corresponds to CURLE_FILE_COULDNT_READ_FILE. + pub fn is_file_couldnt_read_file(&self) -> bool { + self.code == curl_sys::CURLE_FILE_COULDNT_READ_FILE + } + + /// Returns whether this error corresponds to CURLE_FUNCTION_NOT_FOUND. + pub fn is_function_not_found(&self) -> bool { + self.code == curl_sys::CURLE_FUNCTION_NOT_FOUND + } + + /// Returns whether this error corresponds to CURLE_ABORTED_BY_CALLBACK. + pub fn is_aborted_by_callback(&self) -> bool { + self.code == curl_sys::CURLE_ABORTED_BY_CALLBACK + } + + /// Returns whether this error corresponds to CURLE_BAD_FUNCTION_ARGUMENT. + pub fn is_bad_function_argument(&self) -> bool { + self.code == curl_sys::CURLE_BAD_FUNCTION_ARGUMENT + } + + /// Returns whether this error corresponds to CURLE_INTERFACE_FAILED. + pub fn is_interface_failed(&self) -> bool { + self.code == curl_sys::CURLE_INTERFACE_FAILED + } + + /// Returns whether this error corresponds to CURLE_TOO_MANY_REDIRECTS. + pub fn is_too_many_redirects(&self) -> bool { + self.code == curl_sys::CURLE_TOO_MANY_REDIRECTS + } + + /// Returns whether this error corresponds to CURLE_UNKNOWN_OPTION. + pub fn is_unknown_option(&self) -> bool { + self.code == curl_sys::CURLE_UNKNOWN_OPTION + } + + /// Returns whether this error corresponds to CURLE_PEER_FAILED_VERIFICATION. + pub fn is_peer_failed_verification(&self) -> bool { + self.code == curl_sys::CURLE_PEER_FAILED_VERIFICATION + } + + /// Returns whether this error corresponds to CURLE_GOT_NOTHING. + pub fn is_got_nothing(&self) -> bool { + self.code == curl_sys::CURLE_GOT_NOTHING + } + + /// Returns whether this error corresponds to CURLE_SSL_ENGINE_NOTFOUND. + pub fn is_ssl_engine_notfound(&self) -> bool { + self.code == curl_sys::CURLE_SSL_ENGINE_NOTFOUND + } + + /// Returns whether this error corresponds to CURLE_SSL_ENGINE_SETFAILED. + pub fn is_ssl_engine_setfailed(&self) -> bool { + self.code == curl_sys::CURLE_SSL_ENGINE_SETFAILED + } + + /// Returns whether this error corresponds to CURLE_SEND_ERROR. + pub fn is_send_error(&self) -> bool { + self.code == curl_sys::CURLE_SEND_ERROR + } + + /// Returns whether this error corresponds to CURLE_RECV_ERROR. + pub fn is_recv_error(&self) -> bool { + self.code == curl_sys::CURLE_RECV_ERROR + } + + /// Returns whether this error corresponds to CURLE_SSL_CERTPROBLEM. + pub fn is_ssl_certproblem(&self) -> bool { + self.code == curl_sys::CURLE_SSL_CERTPROBLEM + } + + /// Returns whether this error corresponds to CURLE_SSL_CIPHER. + pub fn is_ssl_cipher(&self) -> bool { + self.code == curl_sys::CURLE_SSL_CIPHER + } + + /// Returns whether this error corresponds to CURLE_SSL_CACERT. + pub fn is_ssl_cacert(&self) -> bool { + self.code == curl_sys::CURLE_SSL_CACERT + } + + /// Returns whether this error corresponds to CURLE_BAD_CONTENT_ENCODING. + pub fn is_bad_content_encoding(&self) -> bool { + self.code == curl_sys::CURLE_BAD_CONTENT_ENCODING + } + + /// Returns whether this error corresponds to CURLE_FILESIZE_EXCEEDED. + pub fn is_filesize_exceeded(&self) -> bool { + self.code == curl_sys::CURLE_FILESIZE_EXCEEDED + } + + /// Returns whether this error corresponds to CURLE_USE_SSL_FAILED. + pub fn is_use_ssl_failed(&self) -> bool { + self.code == curl_sys::CURLE_USE_SSL_FAILED + } + + /// Returns whether this error corresponds to CURLE_SEND_FAIL_REWIND. + pub fn is_send_fail_rewind(&self) -> bool { + self.code == curl_sys::CURLE_SEND_FAIL_REWIND + } + + /// Returns whether this error corresponds to CURLE_SSL_ENGINE_INITFAILED. + pub fn is_ssl_engine_initfailed(&self) -> bool { + self.code == curl_sys::CURLE_SSL_ENGINE_INITFAILED + } + + /// Returns whether this error corresponds to CURLE_LOGIN_DENIED. + pub fn is_login_denied(&self) -> bool { + self.code == curl_sys::CURLE_LOGIN_DENIED + } + + /// Returns whether this error corresponds to CURLE_CONV_FAILED. + pub fn is_conv_failed(&self) -> bool { + self.code == curl_sys::CURLE_CONV_FAILED + } + + /// Returns whether this error corresponds to CURLE_CONV_REQD. + pub fn is_conv_required(&self) -> bool { + self.code == curl_sys::CURLE_CONV_REQD + } + + /// Returns whether this error corresponds to CURLE_SSL_CACERT_BADFILE. + pub fn is_ssl_cacert_badfile(&self) -> bool { + self.code == curl_sys::CURLE_SSL_CACERT_BADFILE + } + + /// Returns whether this error corresponds to CURLE_SSL_CRL_BADFILE. + pub fn is_ssl_crl_badfile(&self) -> bool { + self.code == curl_sys::CURLE_SSL_CRL_BADFILE + } + + /// Returns whether this error corresponds to CURLE_SSL_SHUTDOWN_FAILED. + pub fn is_ssl_shutdown_failed(&self) -> bool { + self.code == curl_sys::CURLE_SSL_SHUTDOWN_FAILED + } + + /// Returns whether this error corresponds to CURLE_AGAIN. + pub fn is_again(&self) -> bool { + self.code == curl_sys::CURLE_AGAIN + } + + /// Returns whether this error corresponds to CURLE_SSL_ISSUER_ERROR. + pub fn is_ssl_issuer_error(&self) -> bool { + self.code == curl_sys::CURLE_SSL_ISSUER_ERROR + } + + /// Returns whether this error corresponds to CURLE_CHUNK_FAILED. + pub fn is_chunk_failed(&self) -> bool { + self.code == curl_sys::CURLE_CHUNK_FAILED + } + + // /// Returns whether this error corresponds to CURLE_NO_CONNECTION_AVAILABLE. + // pub fn is_no_connection_available(&self) -> bool { + // self.code == curl_sys::CURLE_NO_CONNECTION_AVAILABLE + // } + + /// Returns the value of the underlying error corresponding to libcurl. + pub fn code(&self) -> curl_sys::CURLcode { + self.code + } + + /// Returns the extra description of this error, if any is available. + pub fn extra_description(&self) -> Option<&str> { + self.extra.as_ref().map(|s| &**s) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let desc = error::Error::description(self); + match self.extra { + Some(ref s) => write!(f, "[{}] {} ({})", self.code(), desc, s), + None => write!(f, "[{}] {}", self.code(), desc), + } + } +} + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Error") + .field("description", &error::Error::description(self)) + .field("code", &self.code) + .field("extra", &self.extra) + .finish() + } +} + +impl error::Error for Error { + fn description(&self) -> &str { + unsafe { + let s = curl_sys::curl_easy_strerror(self.code); + assert!(!s.is_null()); + str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() + } + } +} + +/// An error returned from "share" operations. +/// +/// This structure wraps a `CURLSHcode`. +#[derive(Clone, PartialEq)] +pub struct ShareError { + code: curl_sys::CURLSHcode, +} + +impl ShareError { + /// Creates a new error from the underlying code returned by libcurl. + pub fn new(code: curl_sys::CURLSHcode) -> ShareError { + ShareError { code: code } + } + + /// Returns whether this error corresponds to CURLSHE_BAD_OPTION. + pub fn is_bad_option(&self) -> bool { + self.code == curl_sys::CURLSHE_BAD_OPTION + } + + /// Returns whether this error corresponds to CURLSHE_IN_USE. + pub fn is_in_use(&self) -> bool { + self.code == curl_sys::CURLSHE_IN_USE + } + + /// Returns whether this error corresponds to CURLSHE_INVALID. + pub fn is_invalid(&self) -> bool { + self.code == curl_sys::CURLSHE_INVALID + } + + /// Returns whether this error corresponds to CURLSHE_NOMEM. + pub fn is_nomem(&self) -> bool { + self.code == curl_sys::CURLSHE_NOMEM + } + + // /// Returns whether this error corresponds to CURLSHE_NOT_BUILT_IN. + // pub fn is_not_built_in(&self) -> bool { + // self.code == curl_sys::CURLSHE_NOT_BUILT_IN + // } + + /// Returns the value of the underlying error corresponding to libcurl. + pub fn code(&self) -> curl_sys::CURLSHcode { + self.code + } +} + +impl fmt::Display for ShareError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + error::Error::description(self).fmt(f) + } +} + +impl fmt::Debug for ShareError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ShareError {{ description: {:?}, code: {} }}", + error::Error::description(self), + self.code) + } +} + +impl error::Error for ShareError { + fn description(&self) -> &str { + unsafe { + let s = curl_sys::curl_share_strerror(self.code); + assert!(!s.is_null()); + str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() + } + } +} + +/// An error from "multi" operations. +/// +/// THis structure wraps a `CURLMcode`. +#[derive(Clone, PartialEq)] +pub struct MultiError { + code: curl_sys::CURLMcode, +} + +impl MultiError { + /// Creates a new error from the underlying code returned by libcurl. + pub fn new(code: curl_sys::CURLMcode) -> MultiError { + MultiError { code: code } + } + + /// Returns whether this error corresponds to CURLM_BAD_HANDLE. + pub fn is_bad_handle(&self) -> bool { + self.code == curl_sys::CURLM_BAD_HANDLE + } + + /// Returns whether this error corresponds to CURLM_BAD_EASY_HANDLE. + pub fn is_bad_easy_handle(&self) -> bool { + self.code == curl_sys::CURLM_BAD_EASY_HANDLE + } + + /// Returns whether this error corresponds to CURLM_OUT_OF_MEMORY. + pub fn is_out_of_memory(&self) -> bool { + self.code == curl_sys::CURLM_OUT_OF_MEMORY + } + + /// Returns whether this error corresponds to CURLM_INTERNAL_ERROR. + pub fn is_internal_error(&self) -> bool { + self.code == curl_sys::CURLM_INTERNAL_ERROR + } + + /// Returns whether this error corresponds to CURLM_BAD_SOCKET. + pub fn is_bad_socket(&self) -> bool { + self.code == curl_sys::CURLM_BAD_SOCKET + } + + /// Returns whether this error corresponds to CURLM_UNKNOWN_OPTION. + pub fn is_unknown_option(&self) -> bool { + self.code == curl_sys::CURLM_UNKNOWN_OPTION + } + + /// Returns whether this error corresponds to CURLM_CALL_MULTI_PERFORM. + pub fn is_call_perform(&self) -> bool { + self.code == curl_sys::CURLM_CALL_MULTI_PERFORM + } + + // /// Returns whether this error corresponds to CURLM_ADDED_ALREADY. + // pub fn is_added_already(&self) -> bool { + // self.code == curl_sys::CURLM_ADDED_ALREADY + // } + + /// Returns the value of the underlying error corresponding to libcurl. + pub fn code(&self) -> curl_sys::CURLMcode { + self.code + } +} + +impl fmt::Display for MultiError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + error::Error::description(self).fmt(f) + } +} + +impl fmt::Debug for MultiError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "MultiError {{ description: {:?}, code: {} }}", + error::Error::description(self), + self.code) + } +} + +impl error::Error for MultiError { + fn description(&self) -> &str { + unsafe { + let s = curl_sys::curl_multi_strerror(self.code); + assert!(!s.is_null()); + str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() + } + } +} + + +/// An error from "form add" operations. +/// +/// THis structure wraps a `CURLFORMcode`. +#[derive(Clone, PartialEq)] +pub struct FormError { + code: curl_sys::CURLFORMcode, +} + +impl FormError { + /// Creates a new error from the underlying code returned by libcurl. + pub fn new(code: curl_sys::CURLFORMcode) -> FormError { + FormError { code: code } + } + + /// Returns whether this error corresponds to CURL_FORMADD_MEMORY. + pub fn is_memory(&self) -> bool { + self.code == curl_sys::CURL_FORMADD_MEMORY + } + + /// Returns whether this error corresponds to CURL_FORMADD_OPTION_TWICE. + pub fn is_option_twice(&self) -> bool { + self.code == curl_sys::CURL_FORMADD_OPTION_TWICE + } + + /// Returns whether this error corresponds to CURL_FORMADD_NULL. + pub fn is_null(&self) -> bool { + self.code == curl_sys::CURL_FORMADD_NULL + } + + /// Returns whether this error corresponds to CURL_FORMADD_UNKNOWN_OPTION. + pub fn is_unknown_option(&self) -> bool { + self.code == curl_sys::CURL_FORMADD_UNKNOWN_OPTION + } + + /// Returns whether this error corresponds to CURL_FORMADD_INCOMPLETE. + pub fn is_incomplete(&self) -> bool { + self.code == curl_sys::CURL_FORMADD_INCOMPLETE + } + + /// Returns whether this error corresponds to CURL_FORMADD_ILLEGAL_ARRAY. + pub fn is_illegal_array(&self) -> bool { + self.code == curl_sys::CURL_FORMADD_ILLEGAL_ARRAY + } + + /// Returns whether this error corresponds to CURL_FORMADD_DISABLED. + pub fn is_disabled(&self) -> bool { + self.code == curl_sys::CURL_FORMADD_DISABLED + } + + /// Returns the value of the underlying error corresponding to libcurl. + pub fn code(&self) -> curl_sys::CURLFORMcode { + self.code + } +} + +impl fmt::Display for FormError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + error::Error::description(self).fmt(f) + } +} + +impl fmt::Debug for FormError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "FormError {{ description: {:?}, code: {} }}", + error::Error::description(self), + self.code) + } +} + +impl error::Error for FormError { + fn description(&self) -> &str { + match self.code { + curl_sys::CURL_FORMADD_MEMORY => "allocation failure", + curl_sys::CURL_FORMADD_OPTION_TWICE => "one option passed twice", + curl_sys::CURL_FORMADD_NULL => "null pointer given for string", + curl_sys::CURL_FORMADD_UNKNOWN_OPTION => "unknown option", + curl_sys::CURL_FORMADD_INCOMPLETE => "form information not complete", + curl_sys::CURL_FORMADD_ILLEGAL_ARRAY => "illegal array in option", + curl_sys::CURL_FORMADD_DISABLED => { + "libcurl does not have support for this option compiled in" + } + _ => "unknown form error", + } + } +} + +impl From for Error { + fn from(_: ffi::NulError) -> Error { + Error { code: curl_sys::CURLE_CONV_FAILED, extra: None } + } +} + +impl From for io::Error { + fn from(e: Error) -> io::Error { + io::Error::new(io::ErrorKind::Other, e) + } +} + +impl From for io::Error { + fn from(e: ShareError) -> io::Error { + io::Error::new(io::ErrorKind::Other, e) + } +} + +impl From for io::Error { + fn from(e: MultiError) -> io::Error { + io::Error::new(io::ErrorKind::Other, e) + } +} + +impl From for io::Error { + fn from(e: FormError) -> io::Error { + io::Error::new(io::ErrorKind::Other, e) + } +} diff -Nru cargo-0.17.0/vendor/curl-0.4.6/src/lib.rs cargo-0.19.0/vendor/curl-0.4.6/src/lib.rs --- cargo-0.17.0/vendor/curl-0.4.6/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/src/lib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,122 @@ +//! Rust bindings to the libcurl C library +//! +//! This crate contains bindings for an HTTP/HTTPS client which is powered by +//! [libcurl], the same library behind the `curl` command line tool. The API +//! currently closely matches that of libcurl itself, except that a Rustic layer +//! of safety is applied on top. +//! +//! [libcurl]: https://curl.haxx.se/libcurl/ +//! +//! # The "Easy" API +//! +//! The easiest way to send a request is to use the `Easy` api which corresponds +//! to `CURL` in libcurl. This handle supports a wide variety of options and can +//! be used to make a single blocking request in a thread. Callbacks can be +//! specified to deal with data as it arrives and a handle can be reused to +//! cache connections and such. +//! +//! ```rust,no_run +//! use std::io::{stdout, Write}; +//! +//! use curl::easy::Easy; +//! +//! // Write the contents of rust-lang.org to stdout +//! let mut easy = Easy::new(); +//! easy.url("https://www.rust-lang.org/").unwrap(); +//! easy.write_function(|data| { +//! Ok(stdout().write(data).unwrap()) +//! }).unwrap(); +//! easy.perform().unwrap(); +//! ``` +//! +//! # What about multiple concurrent HTTP requests? +//! +//! One option you have currently is to send multiple requests in multiple +//! threads, but otherwise libcurl has a "multi" interface for doing this +//! operation. Initial bindings of this interface can be found in the `multi` +//! module, but feedback is welcome! +//! +//! # Where does libcurl come from? +//! +//! This crate links to the `curl-sys` crate which is in turn responsible for +//! acquiring and linking to the libcurl library. Currently this crate will +//! build libcurl from source if one is not already detected on the system. +//! +//! There is a large number of releases for libcurl, all with different sets of +//! capabilities. Robust programs may wish to inspect `Version::get()` to test +//! what features are implemented in the linked build of libcurl at runtime. + +#![deny(missing_docs)] +#![doc(html_root_url = "https://docs.rs/curl/0.4")] + +extern crate curl_sys; +extern crate libc; + +#[cfg(all(unix, not(target_os = "macos")))] +extern crate openssl_sys; +#[cfg(all(unix, not(target_os = "macos")))] +extern crate openssl_probe; +#[cfg(windows)] +extern crate winapi; + +use std::ffi::CStr; +use std::str; +use std::sync::{Once, ONCE_INIT}; + +pub use error::{Error, ShareError, MultiError, FormError}; +mod error; + +pub use version::{Version, Protocols}; +mod version; + +mod panic; +pub mod easy; +pub mod multi; + +/// Initializes the underlying libcurl library. +/// +/// It's not required to call this before the library is used, but it's +/// recommended to do so as soon as the program starts. +pub fn init() { + static INIT: Once = ONCE_INIT; + INIT.call_once(|| { + platform_init(); + unsafe { + assert_eq!(curl_sys::curl_global_init(curl_sys::CURL_GLOBAL_ALL), 0); + } + + // Note that we explicitly don't schedule a call to + // `curl_global_cleanup`. The documentation for that function says + // + // > You must not call it when any other thread in the program (i.e. a + // > thread sharing the same memory) is running. This doesn't just mean + // > no other thread that is using libcurl. + // + // We can't ever be sure of that, so unfortunately we can't call the + // function. + }); + + #[cfg(all(unix, not(target_os = "macos")))] + fn platform_init() { + openssl_sys::init(); + } + + #[cfg(not(all(unix, not(target_os = "macos"))))] + fn platform_init() {} +} + +unsafe fn opt_str<'a>(ptr: *const libc::c_char) -> Option<&'a str> { + if ptr.is_null() { + None + } else { + Some(str::from_utf8(CStr::from_ptr(ptr).to_bytes()).unwrap()) + } +} + +fn cvt(r: curl_sys::CURLcode) -> Result<(), Error> { + if r == curl_sys::CURLE_OK { + Ok(()) + } else { + Err(Error::new(r)) + } +} diff -Nru cargo-0.17.0/vendor/curl-0.4.6/src/multi.rs cargo-0.19.0/vendor/curl-0.4.6/src/multi.rs --- cargo-0.17.0/vendor/curl-0.4.6/src/multi.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/src/multi.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,786 @@ +//! Multi - initiating multiple requests simultaneously + +use std::marker; +use std::time::Duration; + +use libc::{c_int, c_char, c_void, c_long, c_short}; +use curl_sys; + +#[cfg(windows)] +use winapi::fd_set; +#[cfg(unix)] +use libc::{fd_set, pollfd, POLLIN, POLLPRI, POLLOUT}; + +use {MultiError, Error}; +use easy::Easy; +use panic; + +/// A multi handle for initiating multiple connections simultaneously. +/// +/// This structure corresponds to `CURLM` in libcurl and provides the ability to +/// have multiple transfers in flight simultaneously. This handle is then used +/// to manage each transfer. The main purpose of a `CURLM` is for the +/// *application* to drive the I/O rather than libcurl itself doing all the +/// blocking. Methods like `action` allow the application to inform libcurl of +/// when events have happened. +/// +/// Lots more documentation can be found on the libcurl [multi tutorial] where +/// the APIs correspond pretty closely with this crate. +/// +/// [multi tutorial]: https://curl.haxx.se/libcurl/c/libcurl-multi.html +pub struct Multi { + raw: *mut curl_sys::CURLM, + data: Box, +} + +struct MultiData { + socket: Box, + timer: Box) -> bool + Send>, +} + +/// Message from the `messages` function of a multi handle. +/// +/// Currently only indicates whether a transfer is done. +pub struct Message<'multi> { + ptr: *mut curl_sys::CURLMsg, + _multi: &'multi Multi, +} + +/// Wrapper around an easy handle while it's owned by a multi handle. +/// +/// Once an easy handle has been added to a multi handle then it can no longer +/// be used via `perform`. This handle is also used to remove the easy handle +/// from the multi handle when desired. +pub struct EasyHandle { + easy: Easy, + // This is now effecitvely bound to a `Multi`, so it is no longer sendable. + _marker: marker::PhantomData<&'static Multi>, +} + +/// Notification of the events that have happened on a socket. +/// +/// This type is passed as an argument to the `action` method on a multi handle +/// to indicate what events have occurred on a socket. +pub struct Events { + bits: c_int, +} + +/// Notification of events that are requested on a socket. +/// +/// This type is yielded to the `socket_function` callback to indicate what +/// events are requested on a socket. +#[derive(Debug)] +pub struct SocketEvents { + bits: c_int, +} + +/// Raw underlying socket type that the multi handles use +pub type Socket = curl_sys::curl_socket_t; + +/// File descriptor to wait on for use with the `wait` method on a multi handle. +pub struct WaitFd { + inner: curl_sys::curl_waitfd, +} + +impl Multi { + /// Creates a new multi session through which multiple HTTP transfers can be + /// initiated. + pub fn new() -> Multi { + unsafe { + ::init(); + let ptr = curl_sys::curl_multi_init(); + assert!(!ptr.is_null()); + Multi { + raw: ptr, + data: Box::new(MultiData { + socket: Box::new(|_, _, _| ()), + timer: Box::new(|_| true), + }), + } + } + } + + /// Set the callback informed about what to wait for + /// + /// When the `action` function runs, it informs the application about + /// updates in the socket (file descriptor) status by doing none, one, or + /// multiple calls to the socket callback. The callback gets status updates + /// with changes since the previous time the callback was called. See + /// `action` for more details on how the callback is used and should work. + /// + /// The `SocketEvents` parameter informs the callback on the status of the + /// given socket, and the methods on that type can be used to learn about + /// what's going on with the socket. + /// + /// The third `usize` parameter is a custom value set by the `assign` method + /// below. + pub fn socket_function(&mut self, f: F) -> Result<(), MultiError> + where F: FnMut(Socket, SocketEvents, usize) + Send + 'static, + { + self._socket_function(Box::new(f)) + } + + fn _socket_function(&mut self, + f: Box) + -> Result<(), MultiError> + { + self.data.socket = f; + let cb: curl_sys::curl_socket_callback = cb; + try!(self.setopt_ptr(curl_sys::CURLMOPT_SOCKETFUNCTION, + cb as usize as *const c_char)); + let ptr = &*self.data as *const _; + try!(self.setopt_ptr(curl_sys::CURLMOPT_SOCKETDATA, + ptr as *const c_char)); + return Ok(()); + + // TODO: figure out how to expose `_easy` + extern fn cb(_easy: *mut curl_sys::CURL, + socket: curl_sys::curl_socket_t, + what: c_int, + userptr: *mut c_void, + socketp: *mut c_void) -> c_int { + panic::catch(|| unsafe { + let f = &mut (*(userptr as *mut MultiData)).socket; + f(socket, SocketEvents { bits: what }, socketp as usize) + }); + 0 + } + } + + /// Set data to associate with an internal socket + /// + /// This function creates an association in the multi handle between the + /// given socket and a private token of the application. This is designed + /// for `action` uses. + /// + /// When set, the token will be passed to all future socket callbacks for + /// the specified socket. + /// + /// If the given socket isn't already in use by libcurl, this function will + /// return an error. + /// + /// libcurl only keeps one single token associated with a socket, so + /// calling this function several times for the same socket will make the + /// last set token get used. + /// + /// The idea here being that this association (socket to token) is something + /// that just about every application that uses this API will need and then + /// libcurl can just as well do it since it already has an internal hash + /// table lookup for this. + /// + /// # Typical Usage + /// + /// In a typical application you allocate a struct or at least use some kind + /// of semi-dynamic data for each socket that we must wait for action on + /// when using the `action` approach. + /// + /// When our socket-callback gets called by libcurl and we get to know about + /// yet another socket to wait for, we can use `assign` to point out the + /// particular data so that when we get updates about this same socket + /// again, we don't have to find the struct associated with this socket by + /// ourselves. + pub fn assign(&self, + socket: Socket, + token: usize) -> Result<(), MultiError> { + unsafe { + try!(cvt(curl_sys::curl_multi_assign(self.raw, socket, + token as *mut _))); + Ok(()) + } + } + + /// Set callback to receive timeout values + /// + /// Certain features, such as timeouts and retries, require you to call + /// libcurl even when there is no activity on the file descriptors. + /// + /// Your callback function should install a non-repeating timer with the + /// interval specified. Each time that timer fires, call either `action` or + /// `perform`, depending on which interface you use. + /// + /// A timeout value of `None` means you should delete your timer. + /// + /// A timeout value of 0 means you should call `action` or `perform` (once) + /// as soon as possible. + /// + /// This callback will only be called when the timeout changes. + /// + /// The timer callback should return `true` on success, and `false` on + /// error. This callback can be used instead of, or in addition to, + /// `get_timeout`. + pub fn timer_function(&mut self, f: F) -> Result<(), MultiError> + where F: FnMut(Option) -> bool + Send + 'static, + { + self._timer_function(Box::new(f)) + } + + fn _timer_function(&mut self, + f: Box) -> bool + Send>) + -> Result<(), MultiError> + { + self.data.timer = f; + let cb: curl_sys::curl_multi_timer_callback = cb; + try!(self.setopt_ptr(curl_sys::CURLMOPT_TIMERFUNCTION, + cb as usize as *const c_char)); + let ptr = &*self.data as *const _; + try!(self.setopt_ptr(curl_sys::CURLMOPT_TIMERDATA, + ptr as *const c_char)); + return Ok(()); + + // TODO: figure out how to expose `_multi` + extern fn cb(_multi: *mut curl_sys::CURLM, + timeout_ms: c_long, + user: *mut c_void) -> c_int { + let keep_going = panic::catch(|| unsafe { + let f = &mut (*(user as *mut MultiData)).timer; + if timeout_ms == -1 { + f(None) + } else { + f(Some(Duration::from_millis(timeout_ms as u64))) + } + }).unwrap_or(false); + if keep_going {0} else {-1} + } + } + + fn setopt_ptr(&mut self, + opt: curl_sys::CURLMoption, + val: *const c_char) -> Result<(), MultiError> { + unsafe { + cvt(curl_sys::curl_multi_setopt(self.raw, opt, val)) + } + } + + /// Add an easy handle to a multi session + /// + /// Adds a standard easy handle to the multi stack. This function call will + /// make this multi handle control the specified easy handle. + /// + /// When an easy interface is added to a multi handle, it will use a shared + /// connection cache owned by the multi handle. Removing and adding new easy + /// handles will not affect the pool of connections or the ability to do + /// connection re-use. + /// + /// If you have `timer_function` set in the multi handle (and you really + /// should if you're working event-based with `action` and friends), that + /// callback will be called from within this function to ask for an updated + /// timer so that your main event loop will get the activity on this handle + /// to get started. + /// + /// The easy handle will remain added to the multi handle until you remove + /// it again with `remove` on the returned handle - even when a transfer + /// with that specific easy handle is completed. + pub fn add(&self, mut easy: Easy) -> Result { + // Clear any configuration set by previous transfers because we're + // moving this into a `Send+'static` situation now basically. + easy.transfer(); + + unsafe { + try!(cvt(curl_sys::curl_multi_add_handle(self.raw, easy.raw()))); + } + Ok(EasyHandle { + easy: easy, + _marker: marker::PhantomData, + }) + } + + /// Remove an easy handle from this multi session + /// + /// Removes the easy handle from this multi handle. This will make the + /// returned easy handle be removed from this multi handle's control. + /// + /// When the easy handle has been removed from a multi stack, it is again + /// perfectly legal to invoke `perform` on it. + /// + /// Removing an easy handle while being used is perfectly legal and will + /// effectively halt the transfer in progress involving that easy handle. + /// All other easy handles and transfers will remain unaffected. + pub fn remove(&self, easy: EasyHandle) -> Result { + unsafe { + try!(cvt(curl_sys::curl_multi_remove_handle(self.raw, + easy.easy.raw()))); + } + Ok(easy.easy) + } + + /// Read multi stack informationals + /// + /// Ask the multi handle if there are any messages/informationals from the + /// individual transfers. Messages may include informationals such as an + /// error code from the transfer or just the fact that a transfer is + /// completed. More details on these should be written down as well. + pub fn messages(&self, mut f: F) where F: FnMut(Message) { + self._messages(&mut f) + } + + fn _messages(&self, mut f: &mut FnMut(Message)) { + let mut queue = 0; + unsafe { + loop { + let ptr = curl_sys::curl_multi_info_read(self.raw, &mut queue); + if ptr.is_null() { + break + } + f(Message { ptr: ptr, _multi: self }) + } + } + } + + /// Inform of reads/writes available data given an action + /// + /// When the application has detected action on a socket handled by libcurl, + /// it should call this function with the sockfd argument set to + /// the socket with the action. When the events on a socket are known, they + /// can be passed `events`. When the events on a socket are unknown, pass + /// `Events::new()` instead, and libcurl will test the descriptor + /// internally. + /// + /// The returned integer will contain the number of running easy handles + /// within the multi handle. When this number reaches zero, all transfers + /// are complete/done. When you call `action` on a specific socket and the + /// counter decreases by one, it DOES NOT necessarily mean that this exact + /// socket/transfer is the one that completed. Use `messages` to figure out + /// which easy handle that completed. + /// + /// The `action` function informs the application about updates in the + /// socket (file descriptor) status by doing none, one, or multiple calls to + /// the socket callback function set with the `socket_function` method. They + /// update the status with changes since the previous time the callback was + /// called. + pub fn action(&self, socket: Socket, events: &Events) + -> Result { + let mut remaining = 0; + unsafe { + try!(cvt(curl_sys::curl_multi_socket_action(self.raw, + socket, + events.bits, + &mut remaining))); + Ok(remaining as u32) + } + } + + /// Inform libcurl that a timeout has expired and sockets should be tested. + /// + /// The returned integer will contain the number of running easy handles + /// within the multi handle. When this number reaches zero, all transfers + /// are complete/done. When you call `action` on a specific socket and the + /// counter decreases by one, it DOES NOT necessarily mean that this exact + /// socket/transfer is the one that completed. Use `messages` to figure out + /// which easy handle that completed. + /// + /// Get the timeout time by calling the `timer_function` method. Your + /// application will then get called with information on how long to wait + /// for socket actions at most before doing the timeout action: call the + /// `timeout` method. You can also use the `get_timeout` function to + /// poll the value at any given time, but for an event-based system using + /// the callback is far better than relying on polling the timeout value. + pub fn timeout(&self) -> Result { + let mut remaining = 0; + unsafe { + try!(cvt(curl_sys::curl_multi_socket_action(self.raw, + curl_sys::CURL_SOCKET_BAD, + 0, + &mut remaining))); + Ok(remaining as u32) + } + } + + /// Get how long to wait for action before proceeding + /// + /// An application using the libcurl multi interface should call + /// `get_timeout` to figure out how long it should wait for socket actions - + /// at most - before proceeding. + /// + /// Proceeding means either doing the socket-style timeout action: call the + /// `timeout` function, or call `perform` if you're using the simpler and + /// older multi interface approach. + /// + /// The timeout value returned is the duration at this very moment. If 0, it + /// means you should proceed immediately without waiting for anything. If it + /// returns `None`, there's no timeout at all set. + /// + /// Note: if libcurl returns a `None` timeout here, it just means that + /// libcurl currently has no stored timeout value. You must not wait too + /// long (more than a few seconds perhaps) before you call `perform` again. + pub fn get_timeout(&self) -> Result, MultiError> { + let mut ms = 0; + unsafe { + try!(cvt(curl_sys::curl_multi_timeout(self.raw, &mut ms))); + if ms == -1 { + Ok(None) + } else { + Ok(Some(Duration::from_millis(ms as u64))) + } + } + } + + /// Block until activity is detected or a timeout passes. + /// + /// The timeout is used in millisecond-precision. Large durations are + /// clamped at the maximum value curl accepts. + /// + /// The returned integer will contain the number of internal file + /// descriptors on which interesting events occured. + /// + /// This function is a simpler alternative to using `fdset()` and `select()` + /// and does not suffer from file descriptor limits. + /// + /// # Example + /// + /// ``` + /// use curl::multi::Multi; + /// use std::time::Duration; + /// + /// let m = Multi::new(); + /// + /// // Add some Easy handles... + /// + /// while m.perform().unwrap() > 0 { + /// m.wait(&mut [], Duration::from_secs(1)).unwrap(); + /// } + /// ``` + pub fn wait(&self, waitfds: &mut [WaitFd], timeout: Duration) + -> Result { + let timeout_ms = { + let secs = timeout.as_secs(); + if secs > (i32::max_value() / 1000) as u64 { + // Duration too large, clamp at maximum value. + i32::max_value() + } else { + secs as i32 * 1000 + timeout.subsec_nanos() as i32 / 1000_000 + } + }; + unsafe { + let mut ret = 0; + try!(cvt(curl_sys::curl_multi_wait(self.raw, + waitfds.as_mut_ptr() as *mut _, + waitfds.len() as u32, + timeout_ms, + &mut ret))); + Ok(ret as u32) + } + } + + /// Reads/writes available data from each easy handle. + /// + /// This function handles transfers on all the added handles that need + /// attention in an non-blocking fashion. + /// + /// When an application has found out there's data available for this handle + /// or a timeout has elapsed, the application should call this function to + /// read/write whatever there is to read or write right now etc. This + /// method returns as soon as the reads/writes are done. This function does + /// not require that there actually is any data available for reading or + /// that data can be written, it can be called just in case. It will return + /// the number of handles that still transfer data. + /// + /// If the amount of running handles is changed from the previous call (or + /// is less than the amount of easy handles you've added to the multi + /// handle), you know that there is one or more transfers less "running". + /// You can then call `info` to get information about each individual + /// completed transfer, and that returned info includes `Error` and more. + /// If an added handle fails very quickly, it may never be counted as a + /// running handle. + /// + /// When running_handles is set to zero (0) on the return of this function, + /// there is no longer any transfers in progress. + /// + /// # Return + /// + /// Before libcurl version 7.20.0: If you receive `is_call_perform`, this + /// basically means that you should call `perform` again, before you select + /// on more actions. You don't have to do it immediately, but the return + /// code means that libcurl may have more data available to return or that + /// there may be more data to send off before it is "satisfied". Do note + /// that `perform` will return `is_call_perform` only when it wants to be + /// called again immediately. When things are fine and there is nothing + /// immediate it wants done, it'll return `Ok` and you need to wait for + /// "action" and then call this function again. + /// + /// This function only returns errors etc regarding the whole multi stack. + /// Problems still might have occurred on individual transfers even when + /// this function returns `Ok`. Use `info` to figure out how individual + /// transfers did. + pub fn perform(&self) -> Result { + unsafe { + let mut ret = 0; + try!(cvt(curl_sys::curl_multi_perform(self.raw, &mut ret))); + Ok(ret as u32) + } + } + + /// Extracts file descriptor information from a multi handle + /// + /// This function extracts file descriptor information from a given + /// handle, and libcurl returns its `fd_set` sets. The application can use + /// these to `select()` on, but be sure to `FD_ZERO` them before calling + /// this function as curl_multi_fdset only adds its own descriptors, it + /// doesn't zero or otherwise remove any others. The curl_multi_perform + /// function should be called as soon as one of them is ready to be read + /// from or written to. + /// + /// If no file descriptors are set by libcurl, this function will return + /// `Ok(None)`. Otherwise `Ok(Some(n))` will be returned where `n` the + /// highest descriptor number libcurl set. When `Ok(None)` is returned it + /// is because libcurl currently does something that isn't possible for + /// your application to monitor with a socket and unfortunately you can + /// then not know exactly when the current action is completed using + /// `select()`. You then need to wait a while before you proceed and call + /// `perform` anyway. + /// + /// When doing `select()`, you should use `get_timeout` to figure out + /// how long to wait for action. Call `perform` even if no activity has + /// been seen on the `fd_set`s after the timeout expires as otherwise + /// internal retries and timeouts may not work as you'd think and want. + /// + /// If one of the sockets used by libcurl happens to be larger than what + /// can be set in an `fd_set`, which on POSIX systems means that the file + /// descriptor is larger than `FD_SETSIZE`, then libcurl will try to not + /// set it. Setting a too large file descriptor in an `fd_set` implies an out + /// of bounds write which can cause crashes, or worse. The effect of NOT + /// storing it will possibly save you from the crash, but will make your + /// program NOT wait for sockets it should wait for... + pub fn fdset(&self, + read: Option<&mut fd_set>, + write: Option<&mut fd_set>, + except: Option<&mut fd_set>) -> Result, MultiError> { + unsafe { + let mut ret = 0; + let read = read.map(|r| r as *mut _).unwrap_or(0 as *mut _); + let write = write.map(|r| r as *mut _).unwrap_or(0 as *mut _); + let except = except.map(|r| r as *mut _).unwrap_or(0 as *mut _); + try!(cvt(curl_sys::curl_multi_fdset(self.raw, read, write, except, + &mut ret))); + if ret == -1 { + Ok(None) + } else { + Ok(Some(ret)) + } + } + } + + /// Attempt to close the multi handle and clean up all associated resources. + /// + /// Cleans up and removes a whole multi stack. It does not free or touch any + /// individual easy handles in any way - they still need to be closed + /// individually. + pub fn close(&self) -> Result<(), MultiError> { + unsafe { + cvt(curl_sys::curl_multi_cleanup(self.raw)) + } + } +} + +fn cvt(code: curl_sys::CURLMcode) -> Result<(), MultiError> { + if code == curl_sys::CURLM_OK { + Ok(()) + } else { + Err(MultiError::new(code)) + } +} + +impl Drop for Multi { + fn drop(&mut self) { + let _ = self.close(); + } +} + +impl EasyHandle { + /// Sets an internal private token for this `EasyHandle`. + /// + /// This function will set the `CURLOPT_PRIVATE` field on the underlying + /// easy handle. + pub fn set_token(&mut self, token: usize) -> Result<(), Error> { + unsafe { + ::cvt(curl_sys::curl_easy_setopt(self.easy.raw(), + curl_sys::CURLOPT_PRIVATE, + token)) + } + } +} + +impl<'multi> Message<'multi> { + /// If this message indicates that a transfer has finished, returns the + /// result of the transfer in `Some`. + /// + /// If the message doesn't indicate that a transfer has finished, then + /// `None` is returned. + pub fn result(&self) -> Option> { + unsafe { + if (*self.ptr).msg == curl_sys::CURLMSG_DONE { + Some(::cvt((*self.ptr).data as curl_sys::CURLcode)) + } else { + None + } + } + } + + /// Returns whether this easy message was for the specified easy handle or + /// not. + pub fn is_for(&self, handle: &EasyHandle) -> bool { + unsafe { (*self.ptr).easy_handle == handle.easy.raw() } + } + + /// Returns the token associated with the easy handle that this message + /// represents a completion for. + /// + /// This function will return the token assigned with + /// `EasyHandle::set_token`. This reads the `CURLINFO_PRIVATE` field of the + /// underlying `*mut CURL`. + pub fn token(&self) -> Result { + unsafe { + let mut p = 0usize; + try!(::cvt(curl_sys::curl_easy_getinfo((*self.ptr).easy_handle, + curl_sys::CURLINFO_PRIVATE, + &mut p))); + Ok(p) + } + } +} + +impl Events { + /// Creates a new blank event bit mask. + pub fn new() -> Events { + Events { bits: 0 } + } + + /// Set or unset the whether these events indicate that input is ready. + pub fn input(&mut self, val: bool) -> &mut Events { + self.flag(curl_sys::CURL_CSELECT_IN, val) + } + + /// Set or unset the whether these events indicate that output is ready. + pub fn output(&mut self, val: bool) -> &mut Events { + self.flag(curl_sys::CURL_CSELECT_OUT, val) + } + + /// Set or unset the whether these events indicate that an error has + /// happened. + pub fn error(&mut self, val: bool) -> &mut Events { + self.flag(curl_sys::CURL_CSELECT_ERR, val) + } + + fn flag(&mut self, flag: c_int, val: bool) -> &mut Events { + if val { + self.bits |= flag; + } else { + self.bits &= !flag; + } + self + } +} + +impl SocketEvents { + /// Wait for incoming data. For the socket to become readable. + pub fn input(&self) -> bool { + self.bits & curl_sys::CURL_POLL_IN == curl_sys::CURL_POLL_IN + } + + /// Wait for outgoing data. For the socket to become writable. + pub fn output(&self) -> bool { + self.bits & curl_sys::CURL_POLL_OUT == curl_sys::CURL_POLL_OUT + } + + /// Wait for incoming and outgoing data. For the socket to become readable + /// or writable. + pub fn input_and_output(&self) -> bool { + self.bits & curl_sys::CURL_POLL_INOUT == curl_sys::CURL_POLL_INOUT + } + + /// The specified socket/file descriptor is no longer used by libcurl. + pub fn remove(&self) -> bool { + self.bits & curl_sys::CURL_POLL_REMOVE == curl_sys::CURL_POLL_REMOVE + } +} + +impl WaitFd { + /// Constructs an empty (invalid) WaitFd. + pub fn new() -> WaitFd { + WaitFd { + inner: curl_sys::curl_waitfd { + fd: 0, + events: 0, + revents: 0, + } + } + } + + /// Set the file descriptor to wait for. + pub fn set_fd(&mut self, fd: Socket) { + self.inner.fd = fd; + } + + /// Indicate that the socket should poll on read events such as new data + /// received. + /// + /// Corresponds to `CURL_WAIT_POLLIN`. + pub fn poll_on_read(&mut self, val: bool) -> &mut WaitFd { + self.flag(curl_sys::CURL_WAIT_POLLIN, val) + } + + /// Indicate that the socket should poll on high priority read events such + /// as out of band data. + /// + /// Corresponds to `CURL_WAIT_POLLPRI`. + pub fn poll_on_priority_read(&mut self, val: bool) -> &mut WaitFd { + self.flag(curl_sys::CURL_WAIT_POLLPRI, val) + } + + /// Indicate that the socket should poll on write events such as the socket + /// being clear to write without blocking. + /// + /// Corresponds to `CURL_WAIT_POLLOUT`. + pub fn poll_on_write(&mut self, val: bool) -> &mut WaitFd { + self.flag(curl_sys::CURL_WAIT_POLLOUT, val) + } + + fn flag(&mut self, flag: c_short, val: bool) -> &mut WaitFd { + if val { + self.inner.events |= flag; + } else { + self.inner.events &= !flag; + } + self + } + + /// After a call to `wait`, returns `true` if `poll_on_read` was set and a + /// read event occured. + pub fn received_read(&self) -> bool { + self.inner.revents & curl_sys::CURL_WAIT_POLLIN == curl_sys::CURL_WAIT_POLLIN + } + + /// After a call to `wait`, returns `true` if `poll_on_priority_read` was set and a + /// priority read event occured. + pub fn received_priority_read(&self) -> bool { + self.inner.revents & curl_sys::CURL_WAIT_POLLPRI == curl_sys::CURL_WAIT_POLLPRI + } + + /// After a call to `wait`, returns `true` if `poll_on_write` was set and a + /// write event occured. + pub fn received_write(&self) -> bool { + self.inner.revents & curl_sys::CURL_WAIT_POLLOUT == curl_sys::CURL_WAIT_POLLOUT + } +} + +#[cfg(unix)] +impl From for WaitFd { + fn from(pfd: pollfd) -> WaitFd { + let mut events = 0; + if pfd.events & POLLIN == POLLIN { + events |= curl_sys::CURL_WAIT_POLLIN; + } + if pfd.events & POLLPRI == POLLPRI { + events |= curl_sys::CURL_WAIT_POLLPRI; + } + if pfd.events & POLLOUT == POLLOUT { + events |= curl_sys::CURL_WAIT_POLLOUT; + } + WaitFd { + inner: curl_sys::curl_waitfd { + fd: pfd.fd, + events: events, + revents: 0, + } + } + } +} diff -Nru cargo-0.17.0/vendor/curl-0.4.6/src/panic.rs cargo-0.19.0/vendor/curl-0.4.6/src/panic.rs --- cargo-0.17.0/vendor/curl-0.4.6/src/panic.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/src/panic.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,30 @@ +use std::any::Any; +use std::cell::RefCell; +use std::panic::{self, AssertUnwindSafe}; + +thread_local!(static LAST_ERROR: RefCell>> = { + RefCell::new(None) +}); + +pub fn catch T>(f: F) -> Option { + if LAST_ERROR.with(|slot| slot.borrow().is_some()) { + return None + } + + // Note that `AssertUnwindSafe` is used here as we prevent reentering + // arbitrary code due to the `LAST_ERROR` check above plus propagation of a + // panic after we return back to user code from C. + match panic::catch_unwind(AssertUnwindSafe(f)) { + Ok(ret) => Some(ret), + Err(e) => { + LAST_ERROR.with(|slot| *slot.borrow_mut() = Some(e)); + None + } + } +} + +pub fn propagate() { + if let Some(t) = LAST_ERROR.with(|slot| slot.borrow_mut().take()) { + panic::resume_unwind(t) + } +} diff -Nru cargo-0.17.0/vendor/curl-0.4.6/src/version.rs cargo-0.19.0/vendor/curl-0.4.6/src/version.rs --- cargo-0.17.0/vendor/curl-0.4.6/src/version.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/src/version.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,243 @@ +use std::ffi::CStr; +use std::str; + +use curl_sys; +use libc::{c_int, c_char}; + +/// Version information about libcurl and the capabilities that it supports. +pub struct Version { + inner: *mut curl_sys::curl_version_info_data, +} + +unsafe impl Send for Version {} +unsafe impl Sync for Version {} + +/// An iterator over the list of protocols a version supports. +pub struct Protocols<'a> { + cur: *const *const c_char, + _inner: &'a Version, +} + +impl Version { + /// Returns the libcurl version that this library is currently linked against. + pub fn num() -> &'static str { + unsafe { + let s = CStr::from_ptr(curl_sys::curl_version() as *const _); + str::from_utf8(s.to_bytes()).unwrap() + } + } + + /// Returns the libcurl version that this library is currently linked against. + pub fn get() -> Version { + unsafe { + let ptr = curl_sys::curl_version_info(curl_sys::CURLVERSION_FOURTH); + assert!(!ptr.is_null()); + Version { inner: ptr } + } + } + + /// Returns the human readable version string, + pub fn version(&self) -> &str { + unsafe { + ::opt_str((*self.inner).version).unwrap() + } + } + + /// Returns a numeric representation of the version number + /// + /// This is a 24 bit number made up of the major number, minor, and then + /// patch number. For example 7.9.8 willr eturn 0x070908. + pub fn version_num(&self) -> u32 { + unsafe { + (*self.inner).version_num as u32 + } + } + + /// Returns a human readable string of the host libcurl is built for. + /// + /// This is discovered as part of the build environment. + pub fn host(&self) -> &str { + unsafe { + ::opt_str((*self.inner).host).unwrap() + } + } + + /// Returns whether libcurl supports IPv6 + pub fn feature_ipv6(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_IPV6) + } + + /// Returns whether libcurl supports SSL + pub fn feature_ssl(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_SSL) + } + + /// Returns whether libcurl supports HTTP deflate via libz + pub fn feature_libz(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_LIBZ) + } + + /// Returns whether libcurl supports HTTP NTLM + pub fn feature_ntlm(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_NTLM) + } + + /// Returns whether libcurl supports HTTP GSSNEGOTIATE + pub fn feature_gss_negotiate(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_GSSNEGOTIATE) + } + + /// Returns whether libcurl was built with debug capabilities + pub fn feature_debug(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_DEBUG) + } + + /// Returns whether libcurl was built with SPNEGO authentication + pub fn feature_spnego(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_SPNEGO) + } + + /// Returns whether libcurl was built with large file support + pub fn feature_largefile(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_LARGEFILE) + } + + /// Returns whether libcurl was built with support for IDNA, domain names + /// with international letters. + pub fn feature_idn(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_IDN) + } + + /// Returns whether libcurl was built with support for SSPI. + pub fn feature_sspi(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_SSPI) + } + + /// Returns whether libcurl was built with asynchronous name lookups. + pub fn feature_async_dns(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_ASYNCHDNS) + } + + /// Returns whether libcurl was built with support for character + /// conversions. + pub fn feature_conv(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_CONV) + } + + /// Returns whether libcurl was built with support for TLS-SRP. + pub fn feature_tlsauth_srp(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_TLSAUTH_SRP) + } + + /// Returns whether libcurl was built with support for NTLM delegation to + /// winbind helper. + pub fn feature_ntlm_wb(&self) -> bool { + self.flag(curl_sys::CURL_VERSION_NTLM_WB) + } + + // /// Returns whether libcurl was built with support for HTTP2. + // pub fn feature_http2(&self) -> bool { + // self.flag(curl_sys::CURL_VERSION_HTTP2) + // } + + fn flag(&self, flag: c_int) -> bool { + unsafe { + (*self.inner).features & flag != 0 + } + } + + /// Returns the version of OpenSSL that is used, or None if there is no SSL + /// support. + pub fn ssl_version(&self) -> Option<&str> { + unsafe { + ::opt_str((*self.inner).ssl_version) + } + } + + /// Returns the version of libz that is used, or None if there is no libz + /// support. + pub fn libz_version(&self) -> Option<&str> { + unsafe { + ::opt_str((*self.inner).libz_version) + } + } + + /// Returns an iterator over the list of protocols that this build of + /// libcurl supports. + pub fn protocols(&self) -> Protocols { + unsafe { + Protocols { _inner: self, cur: (*self.inner).protocols } + } + } + + /// If available, the human readable version of ares that libcurl is linked + /// against. + pub fn ares_version(&self) -> Option<&str> { + unsafe { + if (*self.inner).age >= 1 { + ::opt_str((*self.inner).ares) + } else { + None + } + } + } + + /// If available, the version of ares that libcurl is linked against. + pub fn ares_version_num(&self) -> Option { + unsafe { + if (*self.inner).age >= 1 { + Some((*self.inner).ares_num as u32) + } else { + None + } + } + } + + /// If available, the version of libidn that libcurl is linked against. + pub fn libidn_version(&self) -> Option<&str> { + unsafe { + if (*self.inner).age >= 2 { + ::opt_str((*self.inner).libidn) + } else { + None + } + } + } + + /// If available, the version of iconv libcurl is linked against. + pub fn iconv_version_num(&self) -> Option { + unsafe { + if (*self.inner).age >= 3 { + Some((*self.inner).iconv_ver_num as u32) + } else { + None + } + } + } + + /// If available, the version of iconv libcurl is linked against. + pub fn libssh_version(&self) -> Option<&str> { + unsafe { + if (*self.inner).age >= 3 { + ::opt_str((*self.inner).libssh_version) + } else { + None + } + } + } +} + +impl<'a> Iterator for Protocols<'a> { + type Item = &'a str; + + fn next(&mut self) -> Option<&'a str> { + unsafe { + if (*self.cur).is_null() { + return None + } + let ret = ::opt_str(*self.cur).unwrap(); + self.cur = self.cur.offset(1); + Some(ret) + } + } +} diff -Nru cargo-0.17.0/vendor/curl-0.4.6/tests/easy.rs cargo-0.19.0/vendor/curl-0.4.6/tests/easy.rs --- cargo-0.17.0/vendor/curl-0.4.6/tests/easy.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/tests/easy.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,666 @@ +extern crate curl; + +use std::cell::{RefCell, Cell}; +use std::io::Read; +use std::rc::Rc; +use std::str; +use std::time::Duration; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +use curl::easy::{Easy, List, WriteError, ReadError, Transfer}; + +use server::Server; +mod server; + +fn handle() -> Easy { + let mut e = Easy::new(); + t!(e.timeout(Duration::new(20, 0))); + return e +} + +fn sink(data: &[u8]) -> Result { + Ok(data.len()) +} + +#[test] +fn get_smoke() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("HTTP/1.1 200 OK\r\n\r\n"); + + let mut handle = handle(); + t!(handle.url(&s.url("/"))); + t!(handle.perform()); +} + +#[test] +fn get_path() { + let s = Server::new(); + s.receive("\ +GET /foo HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("HTTP/1.1 200 OK\r\n\r\n"); + + let mut handle = handle(); + t!(handle.url(&s.url("/foo"))); + t!(handle.perform()); +} + +#[test] +fn write_callback() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("HTTP/1.1 200 OK\r\n\r\nhello!"); + + let mut all = Vec::::new(); + { + let mut handle = handle(); + t!(handle.url(&s.url("/"))); + let mut handle = handle.transfer(); + t!(handle.write_function(|data| { + all.extend(data); + Ok(data.len()) + })); + t!(handle.perform()); + } + assert_eq!(all, b"hello!"); +} + +#[test] +fn progress() { + let s = Server::new(); + s.receive("\ +GET /foo HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("HTTP/1.1 200 OK\r\n\r\nHello!"); + + let mut hits = 0; + let mut dl = 0.0; + { + let mut handle = handle(); + t!(handle.url(&s.url("/foo"))); + t!(handle.progress(true)); + t!(handle.write_function(sink)); + + let mut handle = handle.transfer(); + t!(handle.progress_function(|_, a, _, _| { + hits += 1; + dl = a; + true + })); + t!(handle.perform()); + } + assert!(hits > 0); + assert_eq!(dl, 6.0); +} + +#[test] +fn headers() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +Foo: bar\r\n\ +Bar: baz\r\n\ +\r\n +Hello!"); + + let mut headers = Vec::new(); + { + let mut handle = handle(); + t!(handle.url(&s.url("/"))); + + let mut handle = handle.transfer(); + t!(handle.header_function(|h| { + headers.push(str::from_utf8(h).unwrap().to_string()); + true + })); + t!(handle.write_function(sink)); + t!(handle.perform()); + } + assert_eq!(headers, vec![ + "HTTP/1.1 200 OK\r\n".to_string(), + "Foo: bar\r\n".to_string(), + "Bar: baz\r\n".to_string(), + "\r\n".to_string(), + ]); +} + +#[test] +fn fail_on_error() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 401 Not so good\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.fail_on_error(true)); + assert!(h.perform().is_err()); + + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 401 Not so good\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.fail_on_error(false)); + t!(h.perform()); +} + +#[test] +fn port() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: localhost:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url("http://localhost/")); + t!(h.port(s.addr().port())); + t!(h.perform()); +} + +#[test] +fn proxy() { + let s = Server::new(); + s.receive("\ +GET http://example.com/ HTTP/1.1\r\n\ +Host: example.com\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url("http://example.com/")); + t!(h.proxy(&s.url("/"))); + t!(h.perform()); +} + +#[test] +fn noproxy() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.proxy(&s.url("/"))); + t!(h.noproxy("127.0.0.1")); + t!(h.perform()); +} + +#[test] +fn misc() { + let mut h = handle(); + t!(h.tcp_nodelay(true)); + // t!(h.tcp_keepalive(true)); + // t!(h.tcp_keepidle(Duration::new(3, 0))); + // t!(h.tcp_keepintvl(Duration::new(3, 0))); + t!(h.buffer_size(10)); + t!(h.dns_cache_timeout(Duration::new(1, 0))); +} + +#[test] +fn userpass() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Authorization: Basic YmFyOg==\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.username("foo")); + t!(h.username("bar")); + t!(h.perform()); +} + +#[test] +fn accept_encoding() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Accept-Encoding: gzip\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.accept_encoding("gzip")); + t!(h.perform()); +} + +#[test] +fn follow_location() { + let s1 = Server::new(); + let s2 = Server::new(); + s1.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s1.send(&format!("\ +HTTP/1.1 301 Moved Permanently\r\n\ +Location: http://{}/foo\r\n\ +\r\n", s2.addr())); + + s2.receive("\ +GET /foo HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s2.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s1.url("/"))); + t!(h.follow_location(true)); + t!(h.perform()); +} + +#[test] +fn put() { + let s = Server::new(); + s.receive("\ +PUT / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Content-Length: 5\r\n\ +\r\n\ +data\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut data = "data\n".as_bytes(); + let mut list = List::new(); + t!(list.append("Expect:")); + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.put(true)); + t!(h.in_filesize(5)); + t!(h.upload(true)); + t!(h.http_headers(list)); + let mut h = h.transfer(); + t!(h.read_function(|buf| { + Ok(data.read(buf).unwrap()) + })); + t!(h.perform()); +} + +#[test] +fn post1() { + let s = Server::new(); + s.receive("\ +POST / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Content-Length: 5\r\n\ +Content-Type: application/x-www-form-urlencoded\r\n\ +\r\n\ +data\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.post(true)); + t!(h.post_fields_copy(b"data\n")); + t!(h.perform()); +} + +#[test] +fn post2() { + let s = Server::new(); + s.receive("\ +POST / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Content-Length: 5\r\n\ +Content-Type: application/x-www-form-urlencoded\r\n\ +\r\n\ +data\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.post(true)); + t!(h.post_fields_copy(b"data\n")); + t!(h.write_function(sink)); + t!(h.perform()); +} + +#[test] +fn post3() { + let s = Server::new(); + s.receive("\ +POST / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Content-Length: 5\r\n\ +Content-Type: application/x-www-form-urlencoded\r\n\ +\r\n\ +data\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut data = "data\n".as_bytes(); + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.post(true)); + t!(h.post_field_size(5)); + let mut h = h.transfer(); + t!(h.read_function(|buf| { + Ok(data.read(buf).unwrap()) + })); + t!(h.perform()); +} + +#[test] +fn referer() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Referer: foo\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.referer("foo")); + t!(h.perform()); +} + +#[test] +fn useragent() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +User-Agent: foo\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.useragent("foo")); + t!(h.perform()); +} + +#[test] +fn custom_headers() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Foo: bar\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut custom = List::new(); + t!(custom.append("Foo: bar")); + t!(custom.append("Accept:")); + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.http_headers(custom)); + t!(h.perform()); +} + +#[test] +fn cookie() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Cookie: foo\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.cookie("foo")); + t!(h.perform()); +} + +#[test] +fn url_encoding() { + let mut h = handle(); + assert_eq!(h.url_encode(b"foo"), "foo"); + assert_eq!(h.url_encode(b"foo bar"), "foo%20bar"); + assert_eq!(h.url_encode(b"foo bar\xff"), "foo%20bar%FF"); + assert_eq!(h.url_encode(b""), ""); + assert_eq!(h.url_decode("foo"), b"foo"); + assert_eq!(h.url_decode("foo%20bar"), b"foo bar"); + assert_eq!(h.url_decode("foo%2"), b"foo%2"); + assert_eq!(h.url_decode("foo%xx"), b"foo%xx"); + assert_eq!(h.url_decode("foo%ff"), b"foo\xff"); + assert_eq!(h.url_decode(""), b""); +} + +#[test] +fn getters() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.cookie_file("/dev/null")); + t!(h.perform()); + assert_eq!(t!(h.response_code()), 200); + assert_eq!(t!(h.redirect_count()), 0); + assert_eq!(t!(h.redirect_url()), None); + assert_eq!(t!(h.content_type()), None); + + let addr = format!("http://{}/", s.addr()); + assert_eq!(t!(h.effective_url()), Some(&addr[..])); + + // TODO: test this + // let cookies = t!(h.cookies()).iter() + // .map(|s| s.to_vec()) + // .collect::>(); + // assert_eq!(cookies.len(), 1); +} + +#[test] +#[should_panic] +fn panic_in_callback() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.header_function(|_| panic!())); + t!(h.perform()); +} + +#[test] +fn abort_read() { + let s = Server::new(); + s.receive("\ +PUT / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Content-Length: 2\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.read_function(|_| Err(ReadError::Abort))); + t!(h.put(true)); + t!(h.in_filesize(2)); + let mut list = List::new(); + t!(list.append("Expect:")); + t!(h.http_headers(list)); + let err = h.perform().unwrap_err(); + assert!(err.is_aborted_by_callback()); +} + +#[test] +fn pause_write_then_resume() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n +a\n +b"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.progress(true)); + + struct State<'a, 'b> { + paused: Cell, + unpaused: Cell, + transfer: RefCell>, + } + + let h = Rc::new(State { + paused: Cell::new(false), + unpaused: Cell::new(false), + transfer: RefCell::new(h.transfer()), + }); + + let h2 = h.clone(); + t!(h.transfer.borrow_mut().write_function(move |data| { + if h2.unpaused.get() { + h2.unpaused.set(false); + Ok(data.len()) + } else { + h2.paused.set(true); + Err(WriteError::Pause) + } + })); + let h2 = h.clone(); + t!(h.transfer.borrow_mut().progress_function(move |_, _, _, _| { + if h2.paused.get() { + h2.paused.set(false); + h2.unpaused.set(true); + t!(h2.transfer.borrow().unpause_write()); + } + true + })); + t!(h.transfer.borrow().perform()); +} + +#[test] +fn perform_in_perform_is_bad() { + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n +a\n +b"); + + let mut h = handle(); + t!(h.url(&s.url("/"))); + t!(h.progress(true)); + + let h = Rc::new(RefCell::new(h.transfer())); + + let h2 = h.clone(); + t!(h.borrow_mut().write_function(move |data| { + assert!(h2.borrow().perform().is_err()); + Ok(data.len()) + })); + t!(h.borrow().perform()); +} diff -Nru cargo-0.17.0/vendor/curl-0.4.6/tests/formdata cargo-0.19.0/vendor/curl-0.4.6/tests/formdata --- cargo-0.17.0/vendor/curl-0.4.6/tests/formdata 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/tests/formdata 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +hello diff -Nru cargo-0.17.0/vendor/curl-0.4.6/tests/multi.rs cargo-0.19.0/vendor/curl-0.4.6/tests/multi.rs --- cargo-0.17.0/vendor/curl-0.4.6/tests/multi.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/tests/multi.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,252 @@ +#![cfg(unix)] + +extern crate curl; +extern crate mio; + +use std::collections::HashMap; +use std::io::{Read, Cursor}; +use std::time::Duration; + +use curl::easy::{Easy, List}; +use curl::multi::Multi; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +use server::Server; +mod server; + +#[test] +fn smoke() { + let m = Multi::new(); + let mut e = Easy::new(); + + let s = Server::new(); + s.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s.send("HTTP/1.1 200 OK\r\n\r\n"); + + t!(e.url(&s.url("/"))); + let _e = t!(m.add(e)); + while t!(m.perform()) > 0 { + t!(m.wait(&mut [], Duration::from_secs(1))); + } +} + +#[test] +fn smoke2() { + let m = Multi::new(); + + let s1 = Server::new(); + s1.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s1.send("HTTP/1.1 200 OK\r\n\r\n"); + + let s2 = Server::new(); + s2.receive("\ +GET / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +\r\n"); + s2.send("HTTP/1.1 200 OK\r\n\r\n"); + + let mut e1 = Easy::new(); + t!(e1.url(&s1.url("/"))); + let _e1 = t!(m.add(e1)); + let mut e2 = Easy::new(); + t!(e2.url(&s2.url("/"))); + let _e2 = t!(m.add(e2)); + + while t!(m.perform()) > 0 { + t!(m.wait(&mut [], Duration::from_secs(1))); + } + + let mut done = 0; + m.messages(|msg| { + msg.result().unwrap().unwrap(); + done += 1; + }); + assert_eq!(done, 2); +} + +#[test] +fn upload_lots() { + use curl::multi::{Socket, SocketEvents, Events}; + + #[derive(Debug)] + enum Message { + Timeout(Option), + Wait(Socket, SocketEvents, usize), + } + + let mut m = Multi::new(); + let poll = t!(mio::Poll::new()); + let (tx, rx) = mio::channel::channel(); + let tx2 = tx.clone(); + t!(m.socket_function(move |socket, events, token| { + t!(tx2.send(Message::Wait(socket, events, token))); + })); + t!(m.timer_function(move |dur| { + t!(tx.send(Message::Timeout(dur))); + true + })); + + let s = Server::new(); + s.receive(&format!("\ +PUT / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Content-Length: 131072\r\n\ +\r\n\ +{}\n", vec!["a"; 128 * 1024 - 1].join(""))); + s.send("\ +HTTP/1.1 200 OK\r\n\ +\r\n"); + + let mut data = vec![b'a'; 128 * 1024 - 1]; + data.push(b'\n'); + let mut data = Cursor::new(data); + let mut list = List::new(); + t!(list.append("Expect:")); + let mut h = Easy::new(); + t!(h.url(&s.url("/"))); + t!(h.put(true)); + t!(h.read_function(move |buf| { + Ok(data.read(buf).unwrap()) + })); + t!(h.in_filesize(128 * 1024)); + t!(h.upload(true)); + t!(h.http_headers(list)); + + t!(poll.register(&rx, + mio::Token(0), + mio::Ready::all(), + mio::PollOpt::level())); + + let e = t!(m.add(h)); + + assert!(t!(m.perform()) > 0); + let mut next_token = 1; + let mut token_map = HashMap::new(); + let mut cur_timeout = None; + let mut events = mio::Events::with_capacity(128); + let mut running = true; + + while running { + let n = t!(poll.poll(&mut events, cur_timeout)); + + if n == 0 { + if t!(m.timeout()) == 0 { + running = false; + } + } + + for event in events.iter() { + while event.token() == mio::Token(0) { + match rx.try_recv() { + Ok(Message::Timeout(dur)) => cur_timeout = dur, + Ok(Message::Wait(socket, events, token)) => { + let evented = mio::unix::EventedFd(&socket); + if events.remove() { + token_map.remove(&token).unwrap(); + } else { + let mut e = mio::Ready::none(); + if events.input() { + e = e | mio::Ready::readable(); + } + if events.output() { + e = e | mio::Ready::writable(); + } + if token == 0 { + let token = next_token; + next_token += 1; + t!(m.assign(socket, token)); + token_map.insert(token, socket); + t!(poll.register(&evented, + mio::Token(token), + e, + mio::PollOpt::level())); + } else { + t!(poll.reregister(&evented, + mio::Token(token), + e, + mio::PollOpt::level())); + } + } + } + Err(_) => break, + } + } + + if event.token() == mio::Token(0) { + continue + } + + let token = event.token(); + let socket = token_map[&token.into()]; + let mut e = Events::new(); + if event.kind().is_readable() { + e.input(true); + } + if event.kind().is_writable() { + e.output(true); + } + if event.kind().is_error() { + e.error(true); + } + let remaining = t!(m.action(socket, &e)); + if remaining == 0 { + running = false; + } + } + } + + let mut done = 0; + m.messages(|m| { + m.result().unwrap().unwrap(); + done += 1; + }); + assert_eq!(done, 1); + + let mut e = t!(m.remove(e)); + assert_eq!(t!(e.response_code()), 200); +} + +// Tests passing raw file descriptors to Multi::wait. The test is limited to Linux only as the +// semantics of the underlying poll(2) system call used by curl apparently differ on other +// platforms, making the test fail. +#[cfg(target_os = "linux")] +#[test] +fn waitfds() { + use std::fs::File; + use std::os::unix::io::AsRawFd; + use curl::multi::WaitFd; + + let filenames = ["/dev/null", "/dev/zero", "/dev/urandom"]; + let files: Vec = filenames.iter() + .map(|filename| File::open(filename).unwrap()) + .collect(); + let mut waitfds: Vec = files.iter().map(|f| { + let mut waitfd = WaitFd::new(); + waitfd.set_fd(f.as_raw_fd()); + waitfd.poll_on_read(true); + waitfd + }).collect(); + + let m = Multi::new(); + let events = t!(m.wait(&mut waitfds, Duration::from_secs(1))); + assert_eq!(events, 3); + for waitfd in waitfds { + assert!(waitfd.received_read()); + } +} diff -Nru cargo-0.17.0/vendor/curl-0.4.6/tests/post.rs cargo-0.19.0/vendor/curl-0.4.6/tests/post.rs --- cargo-0.17.0/vendor/curl-0.4.6/tests/post.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/tests/post.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,108 @@ +extern crate curl; + +use std::str; +use std::time::Duration; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +use curl::easy::{Easy, Form}; + +use server::Server; +mod server; + +fn handle() -> Easy { + let mut e = Easy::new(); + t!(e.timeout(Duration::new(20, 0))); + return e +} + +#[test] +fn custom() { + let s = Server::new(); + s.receive("\ +POST / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Content-Length: 142\r\n\ +Expect: 100-continue\r\n\ +Content-Type: multipart/form-data; boundary=--[..]\r\n\ +\r\n\ +--[..]\r\n\ +Content-Disposition: form-data; name=\"foo\"\r\n\ +\r\n\ +1234\r\n\ +--[..]\r\n"); + s.send("HTTP/1.1 200 OK\r\n\r\n"); + + let mut handle = handle(); + let mut form = Form::new(); + t!(form.part("foo").contents(b"1234").add()); + t!(handle.url(&s.url("/"))); + t!(handle.httppost(form)); + t!(handle.perform()); +} + +#[test] +fn buffer() { + let s = Server::new(); + s.receive("\ +POST / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Content-Length: 181\r\n\ +Expect: 100-continue\r\n\ +Content-Type: multipart/form-data; boundary=--[..]\r\n\ +\r\n\ +--[..]\r\n\ +Content-Disposition: form-data; name=\"foo\"; filename=\"bar\"\r\n\ +Content-Type: foo/bar\r\n\ +\r\n\ +1234\r\n\ +--[..]\r\n"); + s.send("HTTP/1.1 200 OK\r\n\r\n"); + + let mut handle = handle(); + let mut form = Form::new(); + t!(form.part("foo") + .buffer("bar", b"1234".to_vec()) + .content_type("foo/bar") + .add()); + t!(handle.url(&s.url("/"))); + t!(handle.httppost(form)); + t!(handle.perform()); +} + +#[test] +fn file() { + let s = Server::new(); + s.receive("\ +POST / HTTP/1.1\r\n\ +Host: 127.0.0.1:$PORT\r\n\ +Accept: */*\r\n\ +Content-Length: 205\r\n\ +Expect: 100-continue\r\n\ +Content-Type: multipart/form-data; boundary=--[..]\r\n\ +\r\n\ +--[..]\r\n\ +Content-Disposition: form-data; name=\"foo\"; filename=\"formdata\"\r\n\ +Content-Type: application/octet-stream\r\n\ +\r\n\ +hello\n\ +\r\n\ +--[..]\r\n"); + s.send("HTTP/1.1 200 OK\r\n\r\n"); + + let mut handle = handle(); + let mut form = Form::new(); + t!(form.part("foo") + .file("tests/formdata") + .add()); + t!(handle.url(&s.url("/"))); + t!(handle.httppost(form)); + t!(handle.perform()); +} diff -Nru cargo-0.17.0/vendor/curl-0.4.6/tests/server/mod.rs cargo-0.19.0/vendor/curl-0.4.6/tests/server/mod.rs --- cargo-0.17.0/vendor/curl-0.4.6/tests/server/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/tests/server/mod.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,175 @@ +#![allow(dead_code)] + +use std::collections::HashSet; +use std::net::{TcpListener, SocketAddr, TcpStream}; +use std::io::prelude::*; +use std::thread; +use std::sync::mpsc::{Sender, Receiver, channel}; +use std::io::BufReader; + +pub struct Server { + messages: Option>, + addr: SocketAddr, + thread: Option>, +} + +enum Message { + Read(String), + Write(String), +} + +fn run(listener: &TcpListener, rx: &Receiver) { + let mut socket = BufReader::new(listener.accept().unwrap().0); + for msg in rx.iter() { + match msg { + Message::Read(ref expected) => { + let mut expected = &expected[..]; + let mut expected_headers = HashSet::new(); + while let Some(i) = expected.find("\n") { + let line = &expected[..i + 1]; + expected = &expected[i + 1..]; + expected_headers.insert(line); + if line == "\r\n" { + break + } + } + + let mut expected_len = None; + while expected_headers.len() > 0 { + let mut actual = String::new(); + t!(socket.read_line(&mut actual)); + if actual.starts_with("Content-Length") { + let len = actual.split(": ").skip(1).next().unwrap(); + expected_len = len.trim().parse().ok(); + } + // various versions of libcurl do different things here + if actual == "Proxy-Connection: Keep-Alive\r\n" { + continue + } + if expected_headers.remove(&actual[..]) { + continue + } + + let mut found = None; + for header in expected_headers.iter() { + if lines_match(header, &actual) { + found = Some(header.clone()); + break + } + } + if let Some(found) = found { + expected_headers.remove(&found); + continue + } + panic!("unexpected header: {:?} (remaining headers {:?})", + actual, expected_headers); + } + for header in expected_headers { + panic!("expected header but not found: {:?}", header); + } + + let mut line = String::new(); + let mut socket = match expected_len { + Some(amt) => socket.by_ref().take(amt), + None => socket.by_ref().take(expected.len() as u64), + }; + while socket.limit() > 0 { + line.truncate(0); + t!(socket.read_line(&mut line)); + if line.len() == 0 { + break + } + if expected.len() == 0 { + panic!("unexpected line: {:?}", line); + } + let i = expected.find("\n").unwrap_or(expected.len() - 1); + let expected_line = &expected[..i + 1]; + expected = &expected[i + 1..]; + if lines_match(expected_line, &line) { + continue + } + panic!("lines didn't match:\n\ + expected: {:?}\n\ + actual: {:?}\n", expected_line, line) + } + if expected.len() != 0 { + println!("didn't get expected data: {:?}", expected); + } + } + Message::Write(ref to_write) => { + t!(socket.get_mut().write_all(to_write.as_bytes())); + return + } + } + } + + let mut dst = Vec::new(); + t!(socket.read_to_end(&mut dst)); + assert!(dst.len() == 0); +} + +fn lines_match(expected: &str, mut actual: &str) -> bool { + for (i, part) in expected.split("[..]").enumerate() { + match actual.find(part) { + Some(j) => { + if i == 0 && j != 0 { + return false + } + actual = &actual[j + part.len()..]; + } + None => { + return false + } + } + } + actual.is_empty() || expected.ends_with("[..]") +} + +impl Server { + pub fn new() -> Server { + let listener = t!(TcpListener::bind("127.0.0.1:0")); + let addr = t!(listener.local_addr()); + let (tx, rx) = channel(); + let thread = thread::spawn(move || run(&listener, &rx)); + Server { + messages: Some(tx), + addr: addr, + thread: Some(thread), + } + } + + pub fn receive(&self, msg: &str) { + let msg = msg.replace("$PORT", &self.addr.port().to_string()); + self.msg(Message::Read(msg)); + } + + pub fn send(&self, msg: &str) { + let msg = msg.replace("$PORT", &self.addr.port().to_string()); + self.msg(Message::Write(msg)); + } + + fn msg(&self, msg: Message) { + t!(self.messages.as_ref().unwrap().send(msg)); + } + + pub fn addr(&self) -> &SocketAddr { + &self.addr + } + + pub fn url(&self, path: &str) -> String { + format!("http://{}{}", self.addr, path) + } +} + +impl Drop for Server { + fn drop(&mut self) { + drop(TcpStream::connect(&self.addr)); + drop(self.messages.take()); + let res = self.thread.take().unwrap().join(); + if !thread::panicking() { + t!(res); + } else if let Err(e) = res { + println!("child server thread also failed: {:?}", e); + } + } +} diff -Nru cargo-0.17.0/vendor/curl-0.4.6/.travis.yml cargo-0.19.0/vendor/curl-0.4.6/.travis.yml --- cargo-0.17.0/vendor/curl-0.4.6/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-0.4.6/.travis.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,69 @@ +language: rust +sudo: required +dist: trusty +services: + - docker + +matrix: + include: + - os: linux + rust: stable + env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64 + - os: linux + rust: stable + env: TARGET=i686-unknown-linux-gnu DOCKER=linux32 + - os: linux + rust: stable + env: TARGET=x86_64-unknown-linux-musl DOCKER=musl + - os: linux + rust: stable + env: TARGET=x86_64-pc-windows-gnu NO_RUN=1 DOCKER=mingw + - os: linux + rust: stable + env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64-curl + - os: osx + rust: stable + env: TARGET=x86_64-apple-darwin + - os: osx + rust: stable + env: TARGET=i686-apple-darwin + - os: linux + rust: beta + env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64 + - os: linux + rust: nightly + env: TARGET=x86_64-unknown-linux-gnu DOCKER=linux64 +sudo: false +before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH + - curl https://static.rust-lang.org/rustup.sh | + sh -s -- --add-target=$TARGET --disable-sudo -y --prefix=`rustc --print sysroot` +script: + - curl --version + - cargo generate-lockfile + - cargo generate-lockfile --manifest-path systest/Cargo.toml + - if [ -z "$DOCKER" ]; then + sh ci/run.sh; + else + mkdir .cargo target; + docker build -t rust -f ci/Dockerfile-$DOCKER ci; + docker run + -w /src + -v `pwd`:/src:ro + -v `pwd`/target:/src/target + -v `pwd`/ci/.cargo:/src/.cargo:ro + -v `rustc --print sysroot`:/usr/local:ro + -e TARGET=$TARGET + -e NO_RUN=$NO_RUN + -e CARGO_TARGET_DIR=/src/target + -it rust + sh ci/run.sh; + fi +after_success: + - travis-cargo --only nightly doc-upload +notifications: + email: + on_success: never +env: + global: + secure: "j4son34/PmqogLMUHgcvOk+XtyUtcd0aAA8Sa/h4pyupw8AEM7+5DMMIrcrRh7ieKqmL2RSSGnYtYbd2b5yYroudypsqmQhK0StzrtPaftl/8zxw8liXzA9rat8MP0vuEAe5w9KLRdFKUCU7TzcYXcKttpbavqdNsJae+OFzHJc=" diff -Nru cargo-0.17.0/vendor/curl-sys-0.3.10/build.rs cargo-0.19.0/vendor/curl-sys-0.3.10/build.rs --- cargo-0.17.0/vendor/curl-sys-0.3.10/build.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-sys-0.3.10/build.rs 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,320 @@ +extern crate pkg_config; +extern crate gcc; + +use std::env; +use std::ffi::OsString; +use std::fs; +use std::path::{PathBuf, Path}; +use std::process::Command; +use std::io::ErrorKind; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(t) => t, + Err(e) => panic!("{} return the error {}", stringify!($e), e), + }) +} + +fn main() { + let target = env::var("TARGET").unwrap(); + let host = env::var("HOST").unwrap(); + let src = env::current_dir().unwrap(); + let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap()); + let windows = target.contains("windows"); + + // OSX ships libcurl by default, so we just use that version + // unconditionally. + if target.contains("apple") { + return println!("cargo:rustc-flags=-l curl"); + } + + // Illumos/Solaris requires explicit linking with libnsl + if target.contains("solaris") { + println!("cargo:rustc-flags=-l nsl"); + } + + // Next, fall back and try to use pkg-config if its available. + if !target.contains("windows") { + match pkg_config::find_library("libcurl") { + Ok(lib) => { + for path in lib.include_paths.iter() { + println!("cargo:include={}", path.display()); + } + return + } + Err(e) => println!("Couldn't find libcurl from \ + pkgconfig ({:?}), compiling it from source...", e), + } + } + + if !Path::new("curl/.git").exists() { + let _ = Command::new("git").args(&["submodule", "update", "--init"]) + .status(); + } + + println!("cargo:rustc-link-search={}/lib", dst.display()); + println!("cargo:rustc-link-lib=static=curl"); + println!("cargo:root={}", dst.display()); + println!("cargo:include={}/include", dst.display()); + if windows { + println!("cargo:rustc-link-lib=ws2_32"); + println!("cargo:rustc-link-lib=crypt32"); + } + + // MSVC builds are just totally different + if target.contains("msvc") { + return build_msvc(&target); + } + + let openssl_root = register_dep("OPENSSL"); + let zlib_root = register_dep("Z"); + let nghttp2_root = register_dep("NGHTTP2"); + + let cfg = gcc::Config::new(); + let compiler = cfg.get_compiler(); + + let _ = fs::create_dir(&dst.join("build")); + + let mut cmd = Command::new("sh"); + let mut cflags = OsString::new(); + for arg in compiler.args() { + cflags.push(arg); + cflags.push(" "); + } + + // Can't run ./configure directly on msys2 b/c we're handing in + // Windows-style paths (those starting with C:\), but it chokes on those. + // For that reason we build up a shell script with paths converted to + // posix versions hopefully... + // + // Also apparently the buildbots choke unless we manually set LD, who knows + // why?! + cmd.env("CC", compiler.path()) + .env("CFLAGS", cflags) + .env("LD", &which("ld").unwrap()) + .env("VERBOSE", "1") + .current_dir(&dst.join("build")) + .arg(msys_compatible(&src.join("curl/configure"))); + + // For now this build script doesn't support paths with spaces in them. This + // is arguably a but in curl's configure script, but we could also try to + // paper over it by using a tmp directory which *doesn't* have spaces in it. + // As of now though that's not implemented so just give a nicer error for + // the time being. + let wants_space_error = windows && + (dst.to_str().map(|s| s.contains(" ")).unwrap_or(false) || + src.to_str().map(|s| s.contains(" ")).unwrap_or(false)); + if wants_space_error { + panic!("\n\nunfortunately ./configure of libcurl is known to \ + fail if there's a space in the path to the current \ + directory\n\n\ + there's a space in either\n {}\n {}\nand this will cause the \ + build to fail\n\n\ + the MSVC build should work with a directory that has \ + spaces in it, and it would also work to move this to a \ + different directory without spaces\n\n", + src.display(), dst.display()) + } + + if windows { + cmd.arg("--with-winssl"); + } else { + cmd.arg("--without-ca-bundle"); + cmd.arg("--without-ca-path"); + } + if let Some(root) = openssl_root { + cmd.arg(format!("--with-ssl={}", msys_compatible(&root))); + } + if let Some(root) = zlib_root { + cmd.arg(format!("--with-zlib={}", msys_compatible(&root))); + } + cmd.arg("--enable-static=yes"); + cmd.arg("--enable-shared=no"); + match &env::var("PROFILE").unwrap()[..] { + "bench" | "release" => { + cmd.arg("--enable-optimize"); + } + _ => { + cmd.arg("--enable-debug"); + cmd.arg("--disable-optimize"); + } + } + cmd.arg(format!("--prefix={}", msys_compatible(&dst))); + + if target != host && + (!target.contains("windows") || !host.contains("windows")) { + // NOTE GNU terminology + // BUILD = machine where we are (cross) compiling curl + // HOST = machine where the compiled curl will be used + // TARGET = only relevant when compiling compilers + if target.contains("windows") { + // curl's configure can't parse `-windows-` triples when used + // as `--host`s. In those cases we use this combination of + // `host` and `target` that appears to do the right thing. + cmd.arg(format!("--host={}", host)); + cmd.arg(format!("--target={}", target)); + } else { + cmd.arg(format!("--build={}", host)); + cmd.arg(format!("--host={}", target)); + } + } + + if let Some(root) = nghttp2_root { + cmd.arg(format!("--with-nghttp2={}", msys_compatible(&root))); + } else { + cmd.arg("--without-nghttp2"); + } + + cmd.arg("--without-librtmp"); + cmd.arg("--without-libidn"); + cmd.arg("--without-libssh2"); + cmd.arg("--disable-ldap"); + cmd.arg("--disable-ldaps"); + cmd.arg("--disable-ftp"); + cmd.arg("--disable-rtsp"); + cmd.arg("--disable-dict"); + cmd.arg("--disable-telnet"); + cmd.arg("--disable-tftp"); + cmd.arg("--disable-pop3"); + cmd.arg("--disable-imap"); + cmd.arg("--disable-smtp"); + cmd.arg("--disable-gopher"); + cmd.arg("--disable-manual"); + cmd.arg("--disable-smb"); + cmd.arg("--disable-sspi"); + + run(&mut cmd, "sh"); + run(Command::new(make()) + .arg(&format!("-j{}", env::var("NUM_JOBS").unwrap())) + .current_dir(&dst.join("build")), "make"); + run(Command::new(make()) + .arg("install") + .current_dir(&dst.join("build")), "make"); +} + +fn run(cmd: &mut Command, program: &str) { + println!("running: {:?}", cmd); + let status = match cmd.status() { + Ok(status) => status, + Err(ref e) if e.kind() == ErrorKind::NotFound => { + fail(&format!("failed to execute command: {}\nis `{}` not installed?", + e, program)); + } + Err(e) => fail(&format!("failed to execute command: {}", e)), + }; + if !status.success() { + fail(&format!("command did not execute successfully, got: {}", status)); + } +} + +fn fail(s: &str) -> ! { + panic!("\n{}\n\nbuild script failed, must exit now", s) +} + +fn make() -> &'static str { + if cfg!(target_os = "freebsd") {"gmake"} else {"make"} +} + +fn which(cmd: &str) -> Option { + let cmd = format!("{}{}", cmd, env::consts::EXE_SUFFIX); + let paths = env::var_os("PATH").unwrap(); + env::split_paths(&paths).map(|p| p.join(&cmd)).find(|p| { + fs::metadata(p).is_ok() + }) +} + +fn msys_compatible(path: &Path) -> String { + let path = path.to_str().unwrap(); + if !cfg!(windows) { + return path.to_string() + } + path.replace("C:\\", "/c/") + .replace("\\", "/") +} + +fn register_dep(dep: &str) -> Option { + if let Some(s) = env::var_os(&format!("DEP_{}_ROOT", dep)) { + prepend("PKG_CONFIG_PATH", Path::new(&s).join("lib/pkgconfig")); + return Some(s.into()) + } + if let Some(s) = env::var_os(&format!("DEP_{}_INCLUDE", dep)) { + let root = Path::new(&s).parent().unwrap(); + env::set_var(&format!("DEP_{}_ROOT", dep), root); + let path = root.join("lib/pkgconfig"); + if path.exists() { + prepend("PKG_CONFIG_PATH", path); + return Some(root.to_path_buf()) + } + } + + return None; + + fn prepend(var: &str, val: PathBuf) { + let prefix = env::var(var).unwrap_or(String::new()); + let mut v = vec![val]; + v.extend(env::split_paths(&prefix)); + env::set_var(var, &env::join_paths(v).unwrap()); + } +} + +fn build_msvc(target: &str) { + let cmd = gcc::windows_registry::find(target, "nmake.exe"); + let mut cmd = cmd.unwrap_or(Command::new("nmake.exe")); + let src = env::current_dir().unwrap(); + let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap()); + let machine = if target.starts_with("x86_64") { + "x64" + } else if target.starts_with("i686") { + "x86" + } else { + panic!("unknown msvc target: {}", target); + }; + + t!(fs::create_dir_all(dst.join("include/curl"))); + t!(fs::create_dir_all(dst.join("lib"))); + + cmd.current_dir(src.join("curl/winbuild")); + cmd.arg("/f").arg("Makefile.vc") + .arg("MODE=static") + .arg("ENABLE_IDN=yes") + .arg("DEBUG=no") + .arg("GEN_PDB=no") + .arg("ENABLE_WINSSL=yes") + .arg("ENABLE_SSPI=yes") + .arg(format!("MACHINE={}", machine)); + + let features = env::var("CARGO_CFG_TARGET_FEATURE") + .unwrap_or(String::new()); + if features.contains("crt-static") { + cmd.arg("RTLIBCFG=static"); + } + + if let Some(inc) = env::var_os("DEP_Z_ROOT") { + let inc = PathBuf::from(inc); + let mut s = OsString::from("WITH_DEVEL="); + s.push(&inc); + cmd.arg("WITH_ZLIB=static").arg(s); + + // the build system for curl expects this library to be called + // zlib_a.lib, so make sure it's named correctly (where libz-sys just + // produces zlib.lib) + let _ = fs::remove_file(&inc.join("lib/zlib_a.lib")); + t!(fs::hard_link(inc.join("lib/zlib.lib"), inc.join("lib/zlib_a.lib"))); + } + run(&mut cmd, "nmake"); + + let name = format!("libcurl-vc-{}-release-static-zlib-static-\ + ipv6-sspi-winssl", machine); + let libs = src.join("curl/builds").join(name); + + t!(fs::copy(libs.join("lib/libcurl_a.lib"), dst.join("lib/curl.lib"))); + for f in t!(fs::read_dir(libs.join("include/curl"))) { + let path = t!(f).path(); + let dst = dst.join("include/curl").join(path.file_name().unwrap()); + t!(fs::copy(path, dst)); + } + t!(fs::remove_dir_all(src.join("curl/builds"))); + println!("cargo:rustc-link-lib=wldap32"); + println!("cargo:rustc-link-lib=advapi32"); + println!("cargo:rustc-link-lib=normaliz"); +} diff -Nru cargo-0.17.0/vendor/curl-sys-0.3.10/.cargo-checksum.json cargo-0.19.0/vendor/curl-sys-0.3.10/.cargo-checksum.json --- cargo-0.17.0/vendor/curl-sys-0.3.10/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-sys-0.3.10/.cargo-checksum.json 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1 @@ +{"files": {"build.rs": "9802da9e5173613e115a6663e6c087dc7e7cafae11a26d33515b2ad883b15d37", "Cargo.toml": "6fa4fc3aacdcb52616955d6b2e9113b95158e97bc5cdeaefac4ae894af914d5b", ".cargo-ok": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "lib.rs": "a99fbe1e9dc3e378061ec92e625b31288ecc929b477827a13d085c1116729d89"}, "package": "c0d909dc402ae80b6f7b0118c039203436061b9d9a3ca5d2c2546d93e0a61aaa"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/curl-sys-0.3.10/Cargo.toml cargo-0.19.0/vendor/curl-sys-0.3.10/Cargo.toml --- cargo-0.17.0/vendor/curl-sys-0.3.10/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-sys-0.3.10/Cargo.toml 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,34 @@ +[package] +name = "curl-sys" +version = "0.3.10" +authors = ["Carl Lerche ", + "Alex Crichton "] +links = "curl" +build = "build.rs" +license = "MIT" +repository = "https://github.com/alexcrichton/curl-rust" +description = "Native bindings to the libcurl library" +documentation = "https://docs.rs/curl-sys" +categories = ["external-ffi-bindings"] + +[badges] +travis-ci = { repository = "alexcrichton/curl-rust" } +appveyor = { repository = "alexcrichton/curl-rust" } + +[build-dependencies] +pkg-config = "0.3" +gcc = "0.3.10" + +[lib] +name = "curl_sys" +path = "lib.rs" + +[dependencies] +libz-sys = ">= 0" +libc = "0.2" + +[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies] +openssl-sys = "0.9" + +[target."cfg(windows)".dependencies] +winapi = "0.2" diff -Nru cargo-0.17.0/vendor/curl-sys-0.3.10/lib.rs cargo-0.19.0/vendor/curl-sys-0.3.10/lib.rs --- cargo-0.17.0/vendor/curl-sys-0.3.10/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/curl-sys-0.3.10/lib.rs 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,1044 @@ +#![allow(bad_style)] +#![doc(html_root_url = "https://docs.rs/curl-sys/0.3")] + +extern crate libc; +#[cfg(not(target_env = "msvc"))] +extern crate libz_sys; +#[cfg(all(unix, not(target_os = "macos")))] +extern crate openssl_sys; +#[cfg(windows)] +extern crate winapi; + +use libc::{c_int, c_char, c_uint, c_short, c_long, c_double, c_void, size_t, time_t}; +use libc::c_ulong; + +#[cfg(unix)] +use libc::fd_set; +#[cfg(windows)] +use winapi::fd_set; + +#[cfg(target_env = "msvc")] +#[doc(hidden)] +pub type __enum_ty = libc::c_int; +#[cfg(not(target_env = "msvc"))] +#[doc(hidden)] +pub type __enum_ty = libc::c_uint; + +pub type CURLINFO = __enum_ty; +pub type CURLoption = __enum_ty; +pub type CURLcode = __enum_ty; +pub type CURLversion = __enum_ty; +pub type curl_off_t = i64; + +pub enum CURL {} + +#[cfg(unix)] +pub type curl_socket_t = libc::c_int; +#[cfg(unix)] +pub const CURL_SOCKET_BAD: curl_socket_t = -1; +#[cfg(all(windows, target_pointer_width = "32"))] +pub type curl_socket_t = libc::c_uint; +#[cfg(all(windows, target_pointer_width = "64"))] +pub type curl_socket_t = u64; +#[cfg(windows)] +pub const CURL_SOCKET_BAD: curl_socket_t = !0; + +pub enum curl_httppost { + // Note that this changed in some versions of libcurl, so we currently don't + // bind the fields as they're apparently not stable. + // pub next: *mut curl_httppost, + // pub name: *mut c_char, + // pub namelength: c_long, + // pub contents: *mut c_char, + // pub contentslength: c_long, + // pub buffer: *mut c_char, + // pub bufferlength: c_long, + // pub contenttype: *mut c_char, + // pub contentheader: *mut curl_slist, + // pub more: *mut curl_httppost, + // pub flags: c_long, + // pub showfilename: *mut c_char, + // pub userp: *mut c_void, +} + +// pub const HTTPPOST_FILENAME: c_long = 1 << 0; +// pub const HTTPPOST_READFILE: c_long = 1 << 1; +// pub const HTTPPOST_PTRNAME: c_long = 1 << 2; +// pub const HTTPPOST_PTRCONTENTS: c_long = 1 << 3; +// pub const HTTPPOST_BUFFER: c_long = 1 << 4; +// pub const HTTPPOST_PTRBUFFER: c_long = 1 << 5; +// pub const HTTPPOST_CALLBACK: c_long = 1 << 6; + +pub type curl_progress_callback = extern fn(*mut c_void, + c_double, + c_double, + c_double, + c_double) -> c_int; +// pub type curl_xferinfo_callback = extern fn(*mut c_void, +// curl_off_t, +// curl_off_t, +// curl_off_t, +// curl_off_t) -> c_int; + +pub const CURL_WRITEFUNC_PAUSE: size_t = 0x10000001; + +pub type curl_write_callback = extern fn(*mut c_char, + size_t, + size_t, + *mut c_void) -> size_t; + +pub type curlfiletype = __enum_ty; +pub const CURLFILETYPE_FILE: curlfiletype = 0; +pub const CURLFILETYPE_DIRECTORY: curlfiletype = 1; +pub const CURLFILETYPE_SYMLINK: curlfiletype = 2; +pub const CURLFILETYPE_DEVICE_BLOCK: curlfiletype = 3; +pub const CURLFILETYPE_DEVICE_CHAR: curlfiletype = 4; +pub const CURLFILETYPE_NAMEDPIPE: curlfiletype = 5; +pub const CURLFILETYPE_SOCKET: curlfiletype = 6; +pub const CURLFILETYPE_DOOR: curlfiletype = 7; +pub const CURLFILETYPE_UNKNOWN: curlfiletype = 8; + +pub const CURLFINFOFLAG_KNOWN_FILENAME: c_uint = 1 << 0; +pub const CURLFINFOFLAG_KNOWN_FILETYPE: c_uint = 1 << 1; +pub const CURLFINFOFLAG_KNOWN_TIME: c_uint = 1 << 2; +pub const CURLFINFOFLAG_KNOWN_PERM: c_uint = 1 << 3; +pub const CURLFINFOFLAG_KNOWN_UID: c_uint = 1 << 4; +pub const CURLFINFOFLAG_KNOWN_GID: c_uint = 1 << 5; +pub const CURLFINFOFLAG_KNOWN_SIZE: c_uint = 1 << 6; +pub const CURLFINFOFLAG_KNOWN_HLINKCOUNT: c_uint = 1 << 7; + +#[repr(C)] +pub struct curl_fileinfo { + pub filename: *mut c_char, + pub filetype: curlfiletype, + pub time: time_t, + pub perm: c_uint, + pub uid: c_int, + pub gid: c_int, + pub size: curl_off_t, + pub hardlinks: c_long, + + pub strings_time: *mut c_char, + pub strings_perm: *mut c_char, + pub strings_user: *mut c_char, + pub strings_group: *mut c_char, + pub strings_target: *mut c_char, + + pub flags: c_uint, + pub b_data: *mut c_char, + pub b_size: size_t, + pub b_used: size_t, +} + +pub const CURL_CHUNK_BGN_FUNC_OK: c_long = 0; +pub const CURL_CHUNK_BGN_FUNC_FAIL: c_long = 1; +pub const CURL_CHUNK_BGN_FUNC_SKIP: c_long = 2; +pub type curl_chunk_bgn_callback = extern fn(*const c_void, + *mut c_void, + c_int) -> c_long; + +pub const CURL_CHUNK_END_FUNC_OK: c_long = 0; +pub const CURL_CHUNK_END_FUNC_FAIL: c_long = 1; +pub type curl_chunk_end_callback = extern fn(*mut c_void) -> c_long; + +pub const CURL_FNMATCHFUNC_MATCH: c_int = 0; +pub const CURL_FNMATCHFUNC_NOMATCH: c_int = 1; +pub const CURL_FNMATCHFUNC_FAIL: c_int = 2; +pub type curl_fnmatch_callback = extern fn(*mut c_void, + *const c_char, + *const c_char) -> c_int; + +pub const CURL_SEEKFUNC_OK: c_int = 0; +pub const CURL_SEEKFUNC_FAIL: c_int = 1; +pub const CURL_SEEKFUNC_CANTSEEK: c_int = 2; +pub type curl_seek_callback = extern fn(*mut c_void, + curl_off_t, + c_int) -> c_int; + +pub const CURL_READFUNC_ABORT: size_t = 0x10000000; +pub const CURL_READFUNC_PAUSE: size_t = 0x10000001; +pub type curl_read_callback = extern fn(*mut c_char, + size_t, + size_t, + *mut c_void) -> size_t; + +// pub type curlsocktype = __enum_ty; +// pub const CURLSOCKTYPE_IPCXN: curlsocktype = 0; +// pub const CURLSOCKTYPE_ACCEPT: curlsocktype = 1; +// pub const CURL_SOCKOPT_OK: c_int = 0; +// pub const CURL_SOCKOPT_ERROR: c_int = 1; +// pub const CURL_SOCKOPT_ALREADY_CONNECTED: c_int = 2; +// pub type curl_sockopt_callback = extern fn(*mut c_void, +// curl_socket_t, +// curlsocktype) -> c_int; + +// TODO: sort out libc::sockaddr on windows +// #[repr(C)] +// pub struct curl_sockaddr { +// pub family: c_int, +// pub socktype: c_int, +// pub protocol: c_int, +// pub addrlen: c_uint, +// pub addr: libc::sockaddr, +// } +// +// pub type curl_opensocket_callback = extern fn(*mut c_void, +// curlsocktype, +// *mut curl_sockaddr) -> curl_socket_t; + +pub type curlioerr = __enum_ty; +pub const CURLIOE_OK: curlioerr = 0; +pub const CURLIOE_UNKNOWNCMD: curlioerr = 1; +pub const CURLIOE_FAILRESTART: curlioerr = 2; + +pub type curliocmd = __enum_ty; +pub const CURLIOCMD_NOP: curliocmd = 0; +pub const CURLIOCMD_RESTARTREAD: curliocmd = 1; + +pub type curl_ioctl_callback = extern fn(*mut CURL, c_int, *mut c_void) -> curlioerr; + +pub type curl_malloc_callback = extern fn(size_t) -> *mut c_void; +pub type curl_free_callback = extern fn(*mut c_void); +pub type curl_realloc_callback = extern fn(*mut c_void, size_t) -> *mut c_void; +pub type curl_strdup_callback = extern fn(*const c_char) -> *mut c_char; +pub type curl_calloc_callback = extern fn(size_t, size_t) -> *mut c_void; + +pub type curl_infotype = __enum_ty; +pub const CURLINFO_TEXT: curl_infotype = 0; +pub const CURLINFO_HEADER_IN: curl_infotype = 1; +pub const CURLINFO_HEADER_OUT: curl_infotype = 2; +pub const CURLINFO_DATA_IN: curl_infotype = 3; +pub const CURLINFO_DATA_OUT: curl_infotype = 4; +pub const CURLINFO_SSL_DATA_IN: curl_infotype = 5; +pub const CURLINFO_SSL_DATA_OUT: curl_infotype = 6; + +pub type curl_debug_callback = extern fn(*mut CURL, + curl_infotype, + *mut c_char, + size_t, + *mut c_void) -> c_int; + +pub const CURLE_OK: CURLcode = 0; +pub const CURLE_UNSUPPORTED_PROTOCOL: CURLcode = 1; +pub const CURLE_FAILED_INIT: CURLcode = 2; +pub const CURLE_URL_MALFORMAT: CURLcode = 3; +// pub const CURLE_NOT_BUILT_IN: CURLcode = 4; +pub const CURLE_COULDNT_RESOLVE_PROXY: CURLcode = 5; +pub const CURLE_COULDNT_RESOLVE_HOST: CURLcode = 6; +pub const CURLE_COULDNT_CONNECT: CURLcode = 7; +pub const CURLE_FTP_WEIRD_SERVER_REPLY: CURLcode = 8; +pub const CURLE_REMOTE_ACCESS_DENIED: CURLcode = 9; +// pub const CURLE_FTP_ACCEPT_FAILED: CURLcode = 10; +pub const CURLE_FTP_WEIRD_PASS_REPLY: CURLcode = 11; +// pub const CURLE_FTP_ACCEPT_TIMEOUT: CURLcode = 12; +pub const CURLE_FTP_WEIRD_PASV_REPLY: CURLcode = 13; +pub const CURLE_FTP_WEIRD_227_FORMAT: CURLcode = 14; +pub const CURLE_FTP_CANT_GET_HOST: CURLcode = 15; +pub const CURLE_OBSOLETE16: CURLcode = 16; +pub const CURLE_FTP_COULDNT_SET_TYPE: CURLcode = 17; +pub const CURLE_PARTIAL_FILE: CURLcode = 18; +pub const CURLE_FTP_COULDNT_RETR_FILE: CURLcode = 19; +pub const CURLE_OBSOLETE20: CURLcode = 20; +pub const CURLE_QUOTE_ERROR: CURLcode = 21; +pub const CURLE_HTTP_RETURNED_ERROR: CURLcode = 22; +pub const CURLE_WRITE_ERROR: CURLcode = 23; +pub const CURLE_OBSOLETE24: CURLcode = 24; +pub const CURLE_UPLOAD_FAILED: CURLcode = 25; +pub const CURLE_READ_ERROR: CURLcode = 26; +pub const CURLE_OUT_OF_MEMORY: CURLcode = 27; +pub const CURLE_OPERATION_TIMEDOUT: CURLcode = 28; +pub const CURLE_OBSOLETE29: CURLcode = 29; +pub const CURLE_FTP_PORT_FAILED: CURLcode = 30; +pub const CURLE_FTP_COULDNT_USE_REST: CURLcode = 31; +pub const CURLE_OBSOLETE32: CURLcode = 32; +pub const CURLE_RANGE_ERROR: CURLcode = 33; +pub const CURLE_HTTP_POST_ERROR: CURLcode = 34; +pub const CURLE_SSL_CONNECT_ERROR: CURLcode = 35; +pub const CURLE_BAD_DOWNLOAD_RESUME: CURLcode = 36; +pub const CURLE_FILE_COULDNT_READ_FILE: CURLcode = 37; +pub const CURLE_LDAP_CANNOT_BIND: CURLcode = 38; +pub const CURLE_LDAP_SEARCH_FAILED: CURLcode = 39; +pub const CURLE_OBSOLETE40: CURLcode = 40; +pub const CURLE_FUNCTION_NOT_FOUND: CURLcode = 41; +pub const CURLE_ABORTED_BY_CALLBACK: CURLcode = 42; +pub const CURLE_BAD_FUNCTION_ARGUMENT: CURLcode = 43; +pub const CURLE_OBSOLETE44: CURLcode = 44; +pub const CURLE_INTERFACE_FAILED: CURLcode = 45; +pub const CURLE_OBSOLETE46: CURLcode = 46; +pub const CURLE_TOO_MANY_REDIRECTS : CURLcode = 47; +pub const CURLE_UNKNOWN_OPTION: CURLcode = 48; +pub const CURLE_TELNET_OPTION_SYNTAX : CURLcode = 49; +pub const CURLE_OBSOLETE50: CURLcode = 50; +pub const CURLE_PEER_FAILED_VERIFICATION: CURLcode = 51; +pub const CURLE_GOT_NOTHING: CURLcode = 52; +pub const CURLE_SSL_ENGINE_NOTFOUND: CURLcode = 53; +pub const CURLE_SSL_ENGINE_SETFAILED: CURLcode = 54; +pub const CURLE_SEND_ERROR: CURLcode = 55; +pub const CURLE_RECV_ERROR: CURLcode = 56; +pub const CURLE_OBSOLETE57: CURLcode = 57; +pub const CURLE_SSL_CERTPROBLEM: CURLcode = 58; +pub const CURLE_SSL_CIPHER: CURLcode = 59; +pub const CURLE_SSL_CACERT: CURLcode = 60; +pub const CURLE_BAD_CONTENT_ENCODING: CURLcode = 61; +pub const CURLE_LDAP_INVALID_URL: CURLcode = 62; +pub const CURLE_FILESIZE_EXCEEDED: CURLcode = 63; +pub const CURLE_USE_SSL_FAILED: CURLcode = 64; +pub const CURLE_SEND_FAIL_REWIND: CURLcode = 65; +pub const CURLE_SSL_ENGINE_INITFAILED: CURLcode = 66; +pub const CURLE_LOGIN_DENIED: CURLcode = 67; +pub const CURLE_TFTP_NOTFOUND: CURLcode = 68; +pub const CURLE_TFTP_PERM: CURLcode = 69; +pub const CURLE_REMOTE_DISK_FULL: CURLcode = 70; +pub const CURLE_TFTP_ILLEGAL: CURLcode = 71; +pub const CURLE_TFTP_UNKNOWNID: CURLcode = 72; +pub const CURLE_REMOTE_FILE_EXISTS: CURLcode = 73; +pub const CURLE_TFTP_NOSUCHUSER: CURLcode = 74; +pub const CURLE_CONV_FAILED: CURLcode = 75; +pub const CURLE_CONV_REQD: CURLcode = 76; +pub const CURLE_SSL_CACERT_BADFILE: CURLcode = 77; +pub const CURLE_REMOTE_FILE_NOT_FOUND: CURLcode = 78; +pub const CURLE_SSH: CURLcode = 79; +pub const CURLE_SSL_SHUTDOWN_FAILED: CURLcode = 80; +pub const CURLE_AGAIN: CURLcode = 81; +pub const CURLE_SSL_CRL_BADFILE: CURLcode = 82; +pub const CURLE_SSL_ISSUER_ERROR: CURLcode = 83; +pub const CURLE_FTP_PRET_FAILED: CURLcode = 84; +pub const CURLE_RTSP_CSEQ_ERROR: CURLcode = 85; +pub const CURLE_RTSP_SESSION_ERROR: CURLcode = 86; +pub const CURLE_FTP_BAD_FILE_LIST: CURLcode = 87; +pub const CURLE_CHUNK_FAILED: CURLcode = 88; +// pub const CURLE_NO_CONNECTION_AVAILABLE: CURLcode = 89; + +pub type curl_conv_callback = extern fn(*mut c_char, size_t) -> CURLcode; +pub type curl_ssl_ctx_callback = extern fn(*mut CURL, + *mut c_void, + *mut c_void) -> CURLcode; + +pub type curl_proxytype = __enum_ty; +pub const CURLPROXY_HTTP: curl_proxytype = 0; +pub const CURLPROXY_HTTP_1_0: curl_proxytype = 1; +pub const CURLPROXY_SOCKS4: curl_proxytype = 4; +pub const CURLPROXY_SOCKS5: curl_proxytype = 5; +pub const CURLPROXY_SOCKS4A: curl_proxytype = 6; +pub const CURLPROXY_SOCKS5_HOSTNAME: curl_proxytype = 7; + +pub const CURLAUTH_NONE: c_ulong = 0; +pub const CURLAUTH_BASIC: c_ulong = 1 << 0; +pub const CURLAUTH_DIGEST: c_ulong = 1 << 1; +pub const CURLAUTH_GSSNEGOTIATE: c_ulong = 1 << 2; +pub const CURLAUTH_NTLM: c_ulong = 1 << 3; +pub const CURLAUTH_DIGEST_IE: c_ulong = 1 << 4; +pub const CURLAUTH_NTLM_WB: c_ulong = 1 << 5; +// pub const CURLAUTH_ONLY: c_ulong = 1 << 31; +pub const CURLAUTH_ANY: c_ulong = !CURLAUTH_DIGEST_IE; +pub const CURLAUTH_ANYSAFE: c_ulong = !(CURLAUTH_BASIC | CURLAUTH_DIGEST_IE); + +// pub const CURLSSH_AUTH_ANY: c_ulong = !0; +// pub const CURLSSH_AUTH_NONE: c_ulong = 0; +// pub const CURLSSH_AUTH_PUBLICKEY: c_ulong = 1 << 0; +// pub const CURLSSH_AUTH_PASSWORD: c_ulong = 1 << 1; +// pub const CURLSSH_AUTH_HOST: c_ulong = 1 << 2; +// pub const CURLSSH_AUTH_KEYBOARD: c_ulong = 1 << 3; +// pub const CURLSSH_AUTH_AGENT: c_ulong = 1 << 4; +// pub const CURLSSH_AUTH_DEFAULT: c_ulong = CURLSSH_AUTH_ANY; + +pub const CURLGSSAPI_DELEGATION_NONE: c_ulong = 0; +pub const CURLGSSAPI_DELEGATION_POLICY_FLAG: c_ulong = 1 << 0; +pub const CURLGSSAPI_DELEGATION_FLAG: c_ulong = 1 << 1; + +// pub type curl_khtype = __enum_ty; +// pub const CURLKHTYPE_UNKNOWN: curl_khtype = 0; +// pub const CURLKHTYPE_RSA1: curl_khtype = 1; +// pub const CURLKHTYPE_RSA: curl_khtype = 2; +// pub const CURLKHTYPE_DSS: curl_khtype = 3; + +// #[repr(C)] +// pub struct curl_khkey { +// pub key: *const c_char, +// pub len: size_t, +// pub keytype: curl_khtype, +// } + +// pub type curl_khstat = __enum_ty; +// pub const CURLKHSTAT_FINE_ADD_TO_FILE: curl_khstat = 0; +// pub const CURLKHSTAT_FINE: curl_khstat = 1; +// pub const CURLKHSTAT_REJECT: curl_khstat = 2; +// pub const CURLKHSTAT_DEFER: curl_khstat = 3; +// +// pub type curl_khmatch = __enum_ty; +// pub const CURLKHMATCH_OK: curl_khmatch = 0; +// pub const CURLKHMATCH_MISMATCH: curl_khmatch = 1; +// pub const CURLKHMATCH_MISSING: curl_khmatch = 2; + +// pub type curl_sshkeycallback = extern fn(*mut CURL, +// *const curl_khkey, +// *const curl_khkey, +// curl_khmatch, +// *mut c_void) -> c_int; + +pub const CURL_NETRC_IGNORED: c_ulong = 0; +pub const CURL_NETRC_OPTIONAL: c_ulong = 1; +pub const CURL_NETRC_REQUIRED: c_ulong = 2; + +pub type curl_usessl = __enum_ty; +pub const CURLUSESSL_NONE: curl_usessl = 0; +pub const CURLUSESSL_TRY: curl_usessl = 1; +pub const CURLUSESSL_CONTROL: curl_usessl = 2; +pub const CURLUSESSL_ALL: curl_usessl = 3; + +pub const CURLPROTO_HTTP: c_int = 1 << 0; +pub const CURLPROTO_HTTPS: c_int = 1 << 1; +pub const CURLPROTO_FILE: c_int = 1 << 10; + +pub const CURLOPTTYPE_LONG: CURLoption = 0; +pub const CURLOPTTYPE_OBJECTPOINT: CURLoption = 10_000; +pub const CURLOPTTYPE_FUNCTIONPOINT: CURLoption = 20_000; +pub const CURLOPTTYPE_OFF_T: CURLoption = 30_000; + +pub const CURLOPT_FILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 1; +pub const CURLOPT_URL: CURLoption = CURLOPTTYPE_OBJECTPOINT + 2; +pub const CURLOPT_PORT: CURLoption = CURLOPTTYPE_LONG + 3; +pub const CURLOPT_PROXY: CURLoption = CURLOPTTYPE_OBJECTPOINT + 4; +pub const CURLOPT_USERPWD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 5; +pub const CURLOPT_PROXYUSERPWD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 6; +pub const CURLOPT_RANGE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 7; +pub const CURLOPT_INFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 9; +pub const CURLOPT_ERRORBUFFER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 10; +pub const CURLOPT_WRITEFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 11; +pub const CURLOPT_READFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 12; +pub const CURLOPT_TIMEOUT: CURLoption = CURLOPTTYPE_LONG + 13; +pub const CURLOPT_INFILESIZE: CURLoption = CURLOPTTYPE_LONG + 14; +pub const CURLOPT_POSTFIELDS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 15; +pub const CURLOPT_REFERER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 16; +pub const CURLOPT_FTPPORT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 17; +pub const CURLOPT_USERAGENT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 18; +pub const CURLOPT_LOW_SPEED_LIMIT: CURLoption = CURLOPTTYPE_LONG + 19; +pub const CURLOPT_LOW_SPEED_TIME: CURLoption = CURLOPTTYPE_LONG + 20; +pub const CURLOPT_RESUME_FROM: CURLoption = CURLOPTTYPE_LONG + 21; +pub const CURLOPT_COOKIE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 22; +pub const CURLOPT_HTTPHEADER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 23; +pub const CURLOPT_HTTPPOST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 24; +pub const CURLOPT_SSLCERT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 25; +pub const CURLOPT_KEYPASSWD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 26; +pub const CURLOPT_CRLF: CURLoption = CURLOPTTYPE_LONG + 27; +pub const CURLOPT_QUOTE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 28; +pub const CURLOPT_WRITEHEADER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 29; +pub const CURLOPT_COOKIEFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 31; +pub const CURLOPT_SSLVERSION: CURLoption = CURLOPTTYPE_LONG + 32; +pub const CURLOPT_TIMECONDITION: CURLoption = CURLOPTTYPE_LONG + 33; +pub const CURLOPT_TIMEVALUE: CURLoption = CURLOPTTYPE_LONG + 34; +pub const CURLOPT_CUSTOMREQUEST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 36; +pub const CURLOPT_STDERR: CURLoption = CURLOPTTYPE_OBJECTPOINT + 37; +pub const CURLOPT_POSTQUOTE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 39; +pub const CURLOPT_WRITEINFO: CURLoption = CURLOPTTYPE_OBJECTPOINT + 40; +pub const CURLOPT_VERBOSE: CURLoption = CURLOPTTYPE_LONG + 41; +pub const CURLOPT_HEADER: CURLoption = CURLOPTTYPE_LONG + 42; +pub const CURLOPT_NOPROGRESS: CURLoption = CURLOPTTYPE_LONG + 43; +pub const CURLOPT_NOBODY: CURLoption = CURLOPTTYPE_LONG + 44; +pub const CURLOPT_FAILONERROR: CURLoption = CURLOPTTYPE_LONG + 45; +pub const CURLOPT_UPLOAD: CURLoption = CURLOPTTYPE_LONG + 46; +pub const CURLOPT_POST: CURLoption = CURLOPTTYPE_LONG + 47; +pub const CURLOPT_DIRLISTONLY: CURLoption = CURLOPTTYPE_LONG + 48; +pub const CURLOPT_APPEND: CURLoption = CURLOPTTYPE_LONG + 50; +pub const CURLOPT_NETRC: CURLoption = CURLOPTTYPE_LONG + 51; +pub const CURLOPT_FOLLOWLOCATION: CURLoption = CURLOPTTYPE_LONG + 52; +pub const CURLOPT_TRANSFERTEXT: CURLoption = CURLOPTTYPE_LONG + 53; +pub const CURLOPT_PUT: CURLoption = CURLOPTTYPE_LONG + 54; +pub const CURLOPT_PROGRESSFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 56; +pub const CURLOPT_PROGRESSDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 57; +pub const CURLOPT_AUTOREFERER: CURLoption = CURLOPTTYPE_LONG + 58; +pub const CURLOPT_PROXYPORT: CURLoption = CURLOPTTYPE_LONG + 59; +pub const CURLOPT_POSTFIELDSIZE: CURLoption = CURLOPTTYPE_LONG + 60; +pub const CURLOPT_HTTPPROXYTUNNEL: CURLoption = CURLOPTTYPE_LONG + 61; +pub const CURLOPT_INTERFACE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 62; +pub const CURLOPT_KRBLEVEL: CURLoption = CURLOPTTYPE_OBJECTPOINT + 63; +pub const CURLOPT_SSL_VERIFYPEER: CURLoption = CURLOPTTYPE_LONG + 64; +pub const CURLOPT_CAINFO: CURLoption = CURLOPTTYPE_OBJECTPOINT + 65; +pub const CURLOPT_MAXREDIRS: CURLoption = CURLOPTTYPE_LONG + 68; +pub const CURLOPT_FILETIME: CURLoption = CURLOPTTYPE_LONG + 69; +pub const CURLOPT_TELNETOPTIONS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 70; +pub const CURLOPT_MAXCONNECTS: CURLoption = CURLOPTTYPE_LONG + 71; +pub const CURLOPT_CLOSEPOLICY: CURLoption = CURLOPTTYPE_LONG + 72; +pub const CURLOPT_FRESH_CONNECT: CURLoption = CURLOPTTYPE_LONG + 74; +pub const CURLOPT_FORBID_REUSE: CURLoption = CURLOPTTYPE_LONG + 75; +pub const CURLOPT_RANDOM_FILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 76; +pub const CURLOPT_EGDSOCKET: CURLoption = CURLOPTTYPE_OBJECTPOINT + 77; +pub const CURLOPT_CONNECTTIMEOUT: CURLoption = CURLOPTTYPE_LONG + 78; +pub const CURLOPT_HEADERFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 79; +pub const CURLOPT_HTTPGET: CURLoption = CURLOPTTYPE_LONG + 80; +pub const CURLOPT_SSL_VERIFYHOST: CURLoption = CURLOPTTYPE_LONG + 81; +pub const CURLOPT_COOKIEJAR: CURLoption = CURLOPTTYPE_OBJECTPOINT + 82; +pub const CURLOPT_SSL_CIPHER_LIST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 83; +pub const CURLOPT_HTTP_VERSION: CURLoption = CURLOPTTYPE_LONG + 84; +pub const CURLOPT_FTP_USE_EPSV: CURLoption = CURLOPTTYPE_LONG + 85; +pub const CURLOPT_SSLCERTTYPE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 86; +pub const CURLOPT_SSLKEY: CURLoption = CURLOPTTYPE_OBJECTPOINT + 87; +pub const CURLOPT_SSLKEYTYPE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 88; +pub const CURLOPT_SSLENGINE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 89; +pub const CURLOPT_SSLENGINE_DEFAULT: CURLoption = CURLOPTTYPE_LONG + 90; +pub const CURLOPT_DNS_USE_GLOBAL_CACHE: CURLoption = CURLOPTTYPE_LONG + 91; +pub const CURLOPT_DNS_CACHE_TIMEOUT: CURLoption = CURLOPTTYPE_LONG + 92; +pub const CURLOPT_PREQUOTE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 93; +pub const CURLOPT_DEBUGFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 94; +pub const CURLOPT_DEBUGDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 95; +pub const CURLOPT_COOKIESESSION: CURLoption = CURLOPTTYPE_LONG + 96; +pub const CURLOPT_CAPATH: CURLoption = CURLOPTTYPE_OBJECTPOINT + 97; +pub const CURLOPT_BUFFERSIZE: CURLoption = CURLOPTTYPE_LONG + 98; +pub const CURLOPT_NOSIGNAL: CURLoption = CURLOPTTYPE_LONG + 99; +pub const CURLOPT_SHARE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 100; +pub const CURLOPT_PROXYTYPE: CURLoption = CURLOPTTYPE_LONG + 101; +pub const CURLOPT_ACCEPT_ENCODING: CURLoption = CURLOPTTYPE_OBJECTPOINT + 102; +pub const CURLOPT_PRIVATE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 103; +pub const CURLOPT_HTTP200ALIASES: CURLoption = CURLOPTTYPE_OBJECTPOINT + 104; +pub const CURLOPT_UNRESTRICTED_AUTH: CURLoption = CURLOPTTYPE_LONG + 105; +pub const CURLOPT_FTP_USE_EPRT: CURLoption = CURLOPTTYPE_LONG + 106; +pub const CURLOPT_HTTPAUTH: CURLoption = CURLOPTTYPE_LONG + 107; +pub const CURLOPT_SSL_CTX_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 108; +pub const CURLOPT_SSL_CTX_DATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 109; +pub const CURLOPT_FTP_CREATE_MISSING_DIRS: CURLoption = CURLOPTTYPE_LONG + 110; +pub const CURLOPT_PROXYAUTH: CURLoption = CURLOPTTYPE_LONG + 111; +pub const CURLOPT_FTP_RESPONSE_TIMEOUT: CURLoption = CURLOPTTYPE_LONG + 112; +pub const CURLOPT_IPRESOLVE: CURLoption = CURLOPTTYPE_LONG + 113; +pub const CURLOPT_MAXFILESIZE: CURLoption = CURLOPTTYPE_LONG + 114; +pub const CURLOPT_INFILESIZE_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 115; +pub const CURLOPT_RESUME_FROM_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 116; +pub const CURLOPT_MAXFILESIZE_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 117; +pub const CURLOPT_NETRC_FILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 118; +pub const CURLOPT_USE_SSL: CURLoption = CURLOPTTYPE_LONG + 119; +pub const CURLOPT_POSTFIELDSIZE_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 120; +pub const CURLOPT_TCP_NODELAY: CURLoption = CURLOPTTYPE_LONG + 121; +pub const CURLOPT_FTPSSLAUTH: CURLoption = CURLOPTTYPE_LONG + 129; +pub const CURLOPT_IOCTLFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 130; +pub const CURLOPT_IOCTLDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 131; +pub const CURLOPT_FTP_ACCOUNT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 134; +pub const CURLOPT_COOKIELIST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 135; +pub const CURLOPT_IGNORE_CONTENT_LENGTH: CURLoption = CURLOPTTYPE_LONG + 136; +pub const CURLOPT_FTP_SKIP_PASV_IP: CURLoption = CURLOPTTYPE_LONG + 137; +pub const CURLOPT_FTP_FILEMETHOD: CURLoption = CURLOPTTYPE_LONG + 138; +pub const CURLOPT_LOCALPORT: CURLoption = CURLOPTTYPE_LONG + 139; +pub const CURLOPT_LOCALPORTRANGE: CURLoption = CURLOPTTYPE_LONG + 140; +pub const CURLOPT_CONNECT_ONLY: CURLoption = CURLOPTTYPE_LONG + 141; +pub const CURLOPT_CONV_FROM_NETWORK_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 142; +pub const CURLOPT_CONV_TO_NETWORK_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 143; +pub const CURLOPT_CONV_FROM_UTF8_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 144; +pub const CURLOPT_MAX_SEND_SPEED_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 145; +pub const CURLOPT_MAX_RECV_SPEED_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 146; +pub const CURLOPT_FTP_ALTERNATIVE_TO_USER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 147; +pub const CURLOPT_SOCKOPTFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 148; +pub const CURLOPT_SOCKOPTDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 149; +pub const CURLOPT_SSL_SESSIONID_CACHE: CURLoption = CURLOPTTYPE_LONG + 150; +pub const CURLOPT_SSH_AUTH_TYPES: CURLoption = CURLOPTTYPE_LONG + 151; +pub const CURLOPT_SSH_PUBLIC_KEYFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 152; +pub const CURLOPT_SSH_PRIVATE_KEYFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 153; +pub const CURLOPT_FTP_SSL_CCC: CURLoption = CURLOPTTYPE_LONG + 154; +pub const CURLOPT_TIMEOUT_MS: CURLoption = CURLOPTTYPE_LONG + 155; +pub const CURLOPT_CONNECTTIMEOUT_MS: CURLoption = CURLOPTTYPE_LONG + 156; +pub const CURLOPT_HTTP_TRANSFER_DECODING: CURLoption = CURLOPTTYPE_LONG + 157; +pub const CURLOPT_HTTP_CONTENT_DECODING: CURLoption = CURLOPTTYPE_LONG + 158; +pub const CURLOPT_NEW_FILE_PERMS: CURLoption = CURLOPTTYPE_LONG + 159; +pub const CURLOPT_NEW_DIRECTORY_PERMS: CURLoption = CURLOPTTYPE_LONG + 160; +pub const CURLOPT_POSTREDIR: CURLoption = CURLOPTTYPE_LONG + 161; +pub const CURLOPT_SSH_HOST_PUBLIC_KEY_MD5: CURLoption = CURLOPTTYPE_OBJECTPOINT + 162; +pub const CURLOPT_OPENSOCKETFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 163; +pub const CURLOPT_OPENSOCKETDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 164; +pub const CURLOPT_COPYPOSTFIELDS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 165; +pub const CURLOPT_PROXY_TRANSFER_MODE: CURLoption = CURLOPTTYPE_LONG + 166; +pub const CURLOPT_SEEKFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 167; +pub const CURLOPT_SEEKDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 168; +pub const CURLOPT_CRLFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 169; +pub const CURLOPT_ISSUERCERT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 170; +pub const CURLOPT_ADDRESS_SCOPE: CURLoption = CURLOPTTYPE_LONG + 171; +pub const CURLOPT_CERTINFO: CURLoption = CURLOPTTYPE_LONG + 172; +pub const CURLOPT_USERNAME: CURLoption = CURLOPTTYPE_OBJECTPOINT + 173; +pub const CURLOPT_PASSWORD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 174; +pub const CURLOPT_PROXYUSERNAME: CURLoption = CURLOPTTYPE_OBJECTPOINT + 175; +pub const CURLOPT_PROXYPASSWORD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 176; +pub const CURLOPT_NOPROXY: CURLoption = CURLOPTTYPE_OBJECTPOINT + 177; +pub const CURLOPT_TFTP_BLKSIZE: CURLoption = CURLOPTTYPE_LONG + 178; +pub const CURLOPT_SOCKS5_GSSAPI_SERVICE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 179; +pub const CURLOPT_SOCKS5_GSSAPI_NEC: CURLoption = CURLOPTTYPE_LONG + 180; +pub const CURLOPT_PROTOCOLS: CURLoption = CURLOPTTYPE_LONG + 181; +pub const CURLOPT_REDIR_PROTOCOLS: CURLoption = CURLOPTTYPE_LONG + 182; +pub const CURLOPT_SSH_KNOWNHOSTS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 183; +pub const CURLOPT_SSH_KEYFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 184; +pub const CURLOPT_SSH_KEYDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 185; +pub const CURLOPT_MAIL_FROM: CURLoption = CURLOPTTYPE_OBJECTPOINT + 186; +pub const CURLOPT_MAIL_RCPT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 187; +pub const CURLOPT_FTP_USE_PRET: CURLoption = CURLOPTTYPE_LONG + 188; +pub const CURLOPT_RTSP_REQUEST: CURLoption = CURLOPTTYPE_LONG + 189; +pub const CURLOPT_RTSP_SESSION_ID: CURLoption = CURLOPTTYPE_OBJECTPOINT + 190; +pub const CURLOPT_RTSP_STREAM_URI: CURLoption = CURLOPTTYPE_OBJECTPOINT + 191; +pub const CURLOPT_RTSP_TRANSPORT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 192; +pub const CURLOPT_RTSP_CLIENT_CSEQ: CURLoption = CURLOPTTYPE_LONG + 193; +pub const CURLOPT_RTSP_SERVER_CSEQ: CURLoption = CURLOPTTYPE_LONG + 194; +pub const CURLOPT_INTERLEAVEDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 195; +pub const CURLOPT_INTERLEAVEFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 196; +pub const CURLOPT_WILDCARDMATCH: CURLoption = CURLOPTTYPE_LONG + 197; +pub const CURLOPT_CHUNK_BGN_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 198; +pub const CURLOPT_CHUNK_END_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 199; +pub const CURLOPT_FNMATCH_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 200; +pub const CURLOPT_CHUNK_DATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 201; +pub const CURLOPT_FNMATCH_DATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 202; +pub const CURLOPT_RESOLVE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 203; +pub const CURLOPT_TLSAUTH_USERNAME: CURLoption = CURLOPTTYPE_OBJECTPOINT + 204; +pub const CURLOPT_TLSAUTH_PASSWORD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 205; +pub const CURLOPT_TLSAUTH_TYPE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 206; +pub const CURLOPT_TRANSFER_ENCODING: CURLoption = CURLOPTTYPE_LONG + 207; +pub const CURLOPT_CLOSESOCKETFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 208; +pub const CURLOPT_CLOSESOCKETDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 209; +pub const CURLOPT_GSSAPI_DELEGATION: CURLoption = CURLOPTTYPE_LONG + 210; +// pub const CURLOPT_DNS_SERVERS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 211; +// pub const CURLOPT_ACCEPTTIMEOUT_MS: CURLoption = CURLOPTTYPE_LONG + 212; +// pub const CURLOPT_TCP_KEEPALIVE: CURLoption = CURLOPTTYPE_LONG + 213; +// pub const CURLOPT_TCP_KEEPIDLE: CURLoption = CURLOPTTYPE_LONG + 214; +// pub const CURLOPT_TCP_KEEPINTVL: CURLoption = CURLOPTTYPE_LONG + 215; +pub const CURLOPT_SSL_OPTIONS: CURLoption = CURLOPTTYPE_LONG + 216; +// pub const CURLOPT_MAIL_AUTH: CURLoption = CURLOPTTYPE_OBJECTPOINT + 217; +// pub const CURLOPT_SASL_IR: CURLoption = CURLOPTTYPE_LONG + 218; +// pub const CURLOPT_XFERINFOFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 219; +// pub const CURLOPT_XOAUTH2_BEARER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 220; +// pub const CURLOPT_DNS_INTERFACE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 221; +// pub const CURLOPT_DNS_LOCAL_IP4: CURLoption = CURLOPTTYPE_OBJECTPOINT + 222; +// pub const CURLOPT_DNS_LOCAL_IP6: CURLoption = CURLOPTTYPE_OBJECTPOINT + 223; +// pub const CURLOPT_LOGIN_OPTIONS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 224; + +pub const CURL_IPRESOLVE_WHATEVER: c_int = 0; +pub const CURL_IPRESOLVE_V4: c_int = 1; +pub const CURL_IPRESOLVE_V6: c_int = 2; + +pub const CURLSSLOPT_ALLOW_BEAST: c_long = 1 << 0; +pub const CURLSSLOPT_NO_REVOKE: c_long = 1 << 1; + +/// These enums are for use with the CURLOPT_HTTP_VERSION option. +/// +/// Setting this means we don't care, and that we'd like the library to choose +/// the best possible for us! +pub const CURL_HTTP_VERSION_NONE: c_int = 0; +/// Please use HTTP 1.0 in the request +pub const CURL_HTTP_VERSION_1_0: c_int = 1; +/// Please use HTTP 1.1 in the request +pub const CURL_HTTP_VERSION_1_1: c_int = 2; +/// Please use HTTP 2 in the request +/// (Added in CURL 7.33.0) +pub const CURL_HTTP_VERSION_2_0: c_int = 3; +/// Use version 2 for HTTPS, version 1.1 for HTTP +/// (Added in CURL 7.47.0) +pub const CURL_HTTP_VERSION_2TLS: c_int = 4; +/// Please use HTTP 2 without HTTP/1.1 Upgrade +/// (Added in CURL 7.49.0) +pub const CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE: c_int = 5; + +// Note that the type here is wrong, it's just intended to just be an enum. +pub const CURL_SSLVERSION_DEFAULT: CURLoption = 0; +pub const CURL_SSLVERSION_TLSv1: CURLoption = 1; +pub const CURL_SSLVERSION_SSLv2: CURLoption = 2; +pub const CURL_SSLVERSION_SSLv3: CURLoption = 3; +// pub const CURL_SSLVERSION_TLSv1_0: CURLoption = 4; +// pub const CURL_SSLVERSION_TLSv1_1: CURLoption = 5; +// pub const CURL_SSLVERSION_TLSv1_2: CURLoption = 6; + +pub const CURLOPT_READDATA: CURLoption = CURLOPT_INFILE; +pub const CURLOPT_WRITEDATA: CURLoption = CURLOPT_FILE; +pub const CURLOPT_HEADERDATA: CURLoption = CURLOPT_WRITEHEADER; + +pub type curl_TimeCond = __enum_ty; +pub const CURL_TIMECOND_NONE: curl_TimeCond = 0; +pub const CURL_TIMECOND_IFMODSINCE: curl_TimeCond = 1; +pub const CURL_TIMECOND_IFUNMODSINCE: curl_TimeCond = 2; +pub const CURL_TIMECOND_LASTMOD: curl_TimeCond = 3; + +pub type CURLformoption = __enum_ty; +pub const CURLFORM_NOTHING: CURLformoption = 0; +pub const CURLFORM_COPYNAME: CURLformoption = 1; +pub const CURLFORM_PTRNAME: CURLformoption = 2; +pub const CURLFORM_NAMELENGTH: CURLformoption = 3; +pub const CURLFORM_COPYCONTENTS: CURLformoption = 4; +pub const CURLFORM_PTRCONTENTS: CURLformoption = 5; +pub const CURLFORM_CONTENTSLENGTH: CURLformoption = 6; +pub const CURLFORM_FILECONTENT: CURLformoption = 7; +pub const CURLFORM_ARRAY: CURLformoption = 8; +pub const CURLFORM_OBSOLETE: CURLformoption = 9; +pub const CURLFORM_FILE: CURLformoption = 10; +pub const CURLFORM_BUFFER: CURLformoption = 11; +pub const CURLFORM_BUFFERPTR: CURLformoption = 12; +pub const CURLFORM_BUFFERLENGTH: CURLformoption = 13; +pub const CURLFORM_CONTENTTYPE: CURLformoption = 14; +pub const CURLFORM_CONTENTHEADER: CURLformoption = 15; +pub const CURLFORM_FILENAME: CURLformoption = 16; +pub const CURLFORM_END: CURLformoption = 17; +pub const CURLFORM_STREAM: CURLformoption = 19; + +pub type CURLFORMcode = __enum_ty; +pub const CURL_FORMADD_OK: CURLFORMcode = 0; +pub const CURL_FORMADD_MEMORY: CURLFORMcode = 1; +pub const CURL_FORMADD_OPTION_TWICE: CURLFORMcode = 2; +pub const CURL_FORMADD_NULL: CURLFORMcode = 3; +pub const CURL_FORMADD_UNKNOWN_OPTION: CURLFORMcode = 4; +pub const CURL_FORMADD_INCOMPLETE: CURLFORMcode = 5; +pub const CURL_FORMADD_ILLEGAL_ARRAY: CURLFORMcode = 6; +pub const CURL_FORMADD_DISABLED: CURLFORMcode = 7; + +#[repr(C)] +pub struct curl_forms { + pub option: CURLformoption, + pub value: *const c_char, +} + +pub type curl_formget_callback = extern fn(*mut c_void, + *const c_char, + size_t) -> size_t; + +#[repr(C)] +pub struct curl_slist { + pub data: *mut c_char, + pub next: *mut curl_slist, +} + +#[repr(C)] +pub struct curl_certinfo { + pub num_of_certs: c_int, + pub certinfo: *mut *mut curl_slist, +} + +// pub type curl_sslbackend = __enum_ty; +// pub const CURLSSLBACKEND_NONE: curl_sslbackend = 0; +// pub const CURLSSLBACKEND_OPENSSL: curl_sslbackend = 1; +// pub const CURLSSLBACKEND_GNUTLS: curl_sslbackend = 2; +// pub const CURLSSLBACKEND_NSS: curl_sslbackend = 3; +// pub const CURLSSLBACKEND_QSOSSL: curl_sslbackend = 4; +// pub const CURLSSLBACKEND_GSKIT: curl_sslbackend = 5; +// pub const CURLSSLBACKEND_POLARSSL: curl_sslbackend = 6; +// pub const CURLSSLBACKEND_CYASSL: curl_sslbackend = 7; +// pub const CURLSSLBACKEND_SCHANNEL: curl_sslbackend = 8; +// pub const CURLSSLBACKEND_DARWINSSL: curl_sslbackend = 9; + +// #[repr(C)] +// pub struct curl_tlssessioninfo { +// pub backend: curl_sslbackend, +// pub internals: *mut c_void, +// } + +pub const CURLINFO_STRING: CURLINFO = 0x100000; +pub const CURLINFO_LONG: CURLINFO = 0x200000; +pub const CURLINFO_DOUBLE: CURLINFO = 0x300000; +pub const CURLINFO_SLIST: CURLINFO = 0x400000; +pub const CURLINFO_MASK: CURLINFO = 0x0fffff; +pub const CURLINFO_TYPEMASK: CURLINFO = 0xf00000; + +pub const CURLINFO_EFFECTIVE_URL: CURLINFO = CURLINFO_STRING + 1; +pub const CURLINFO_RESPONSE_CODE: CURLINFO = CURLINFO_LONG + 2; +pub const CURLINFO_TOTAL_TIME: CURLINFO = CURLINFO_DOUBLE + 3; +pub const CURLINFO_NAMELOOKUP_TIME: CURLINFO = CURLINFO_DOUBLE + 4; +pub const CURLINFO_CONNECT_TIME: CURLINFO = CURLINFO_DOUBLE + 5; +pub const CURLINFO_PRETRANSFER_TIME: CURLINFO = CURLINFO_DOUBLE + 6; +pub const CURLINFO_SIZE_UPLOAD: CURLINFO = CURLINFO_DOUBLE + 7; +pub const CURLINFO_SIZE_DOWNLOAD: CURLINFO = CURLINFO_DOUBLE + 8; +pub const CURLINFO_SPEED_DOWNLOAD: CURLINFO = CURLINFO_DOUBLE + 9; +pub const CURLINFO_SPEED_UPLOAD: CURLINFO = CURLINFO_DOUBLE + 10; +pub const CURLINFO_HEADER_SIZE: CURLINFO = CURLINFO_LONG + 11; +pub const CURLINFO_REQUEST_SIZE: CURLINFO = CURLINFO_LONG + 12; +pub const CURLINFO_SSL_VERIFYRESULT: CURLINFO = CURLINFO_LONG + 13; +pub const CURLINFO_FILETIME: CURLINFO = CURLINFO_LONG + 14; +pub const CURLINFO_CONTENT_LENGTH_DOWNLOAD: CURLINFO = CURLINFO_DOUBLE + 15; +pub const CURLINFO_CONTENT_LENGTH_UPLOAD: CURLINFO = CURLINFO_DOUBLE + 16; +pub const CURLINFO_STARTTRANSFER_TIME: CURLINFO = CURLINFO_DOUBLE + 17; +pub const CURLINFO_CONTENT_TYPE: CURLINFO = CURLINFO_STRING + 18; +pub const CURLINFO_REDIRECT_TIME: CURLINFO = CURLINFO_DOUBLE + 19; +pub const CURLINFO_REDIRECT_COUNT: CURLINFO = CURLINFO_LONG + 20; +pub const CURLINFO_PRIVATE: CURLINFO = CURLINFO_STRING + 21; +pub const CURLINFO_HTTP_CONNECTCODE: CURLINFO = CURLINFO_LONG + 22; +pub const CURLINFO_HTTPAUTH_AVAIL: CURLINFO = CURLINFO_LONG + 23; +pub const CURLINFO_PROXYAUTH_AVAIL: CURLINFO = CURLINFO_LONG + 24; +pub const CURLINFO_OS_ERRNO: CURLINFO = CURLINFO_LONG + 25; +pub const CURLINFO_NUM_CONNECTS: CURLINFO = CURLINFO_LONG + 26; +pub const CURLINFO_SSL_ENGINES: CURLINFO = CURLINFO_SLIST + 27; +pub const CURLINFO_COOKIELIST: CURLINFO = CURLINFO_SLIST + 28; +pub const CURLINFO_LASTSOCKET: CURLINFO = CURLINFO_LONG + 29; +pub const CURLINFO_FTP_ENTRY_PATH: CURLINFO = CURLINFO_STRING + 30; +pub const CURLINFO_REDIRECT_URL: CURLINFO = CURLINFO_STRING + 31; +pub const CURLINFO_PRIMARY_IP: CURLINFO = CURLINFO_STRING + 32; +pub const CURLINFO_APPCONNECT_TIME: CURLINFO = CURLINFO_DOUBLE + 33; +pub const CURLINFO_CERTINFO: CURLINFO = CURLINFO_SLIST + 34; +pub const CURLINFO_CONDITION_UNMET: CURLINFO = CURLINFO_LONG + 35; +pub const CURLINFO_RTSP_SESSION_ID: CURLINFO = CURLINFO_STRING + 36; +pub const CURLINFO_RTSP_CLIENT_CSEQ: CURLINFO = CURLINFO_LONG + 37; +pub const CURLINFO_RTSP_SERVER_CSEQ: CURLINFO = CURLINFO_LONG + 38; +pub const CURLINFO_RTSP_CSEQ_RECV: CURLINFO = CURLINFO_LONG + 39; +pub const CURLINFO_PRIMARY_PORT: CURLINFO = CURLINFO_LONG + 40; +pub const CURLINFO_LOCAL_IP: CURLINFO = CURLINFO_STRING + 41; +pub const CURLINFO_LOCAL_PORT: CURLINFO = CURLINFO_LONG + 42; +// pub const CURLINFO_TLS_SESSION: CURLINFO = CURLINFO_SLIST + 43; + +pub type curl_closepolicy = __enum_ty; +pub const CURLCLOSEPOLICY_NONE: curl_closepolicy = 0; +pub const CURLCLOSEPOLICY_OLDEST: curl_closepolicy = 1; +pub const CURLCLOSEPOLICY_LEAST_RECENTLY_USED: curl_closepolicy = 2; +pub const CURLCLOSEPOLICY_LEAST_TRAFFIC: curl_closepolicy = 3; +pub const CURLCLOSEPOLICY_SLOWEST: curl_closepolicy = 4; +pub const CURLCLOSEPOLICY_CALLBACK: curl_closepolicy = 5; + +pub const CURL_GLOBAL_SSL: c_long = 1 << 0; +pub const CURL_GLOBAL_WIN32: c_long = 1 << 1; +pub const CURL_GLOBAL_ALL: c_long = CURL_GLOBAL_SSL | CURL_GLOBAL_WIN32; +pub const CURL_GLOBAL_NOTHING: c_long = 0; +pub const CURL_GLOBAL_DEFAULT: c_long = CURL_GLOBAL_ALL; +// pub const CURL_GLOBAL_ACK_EINTR: c_long = 1 << 2; + +pub type curl_lock_data = __enum_ty; +pub const CURL_LOCK_DATA_NONE: curl_lock_data = 0; +pub const CURL_LOCK_DATA_SHARE: curl_lock_data = 1; +pub const CURL_LOCK_DATA_COOKIE: curl_lock_data = 2; +pub const CURL_LOCK_DATA_DNS: curl_lock_data = 3; +pub const CURL_LOCK_DATA_SSL_SESSION: curl_lock_data = 4; +pub const CURL_LOCK_DATA_CONNECT: curl_lock_data = 5; + +pub type curl_lock_access = __enum_ty; +pub const CURL_LOCK_ACCESS_NONE: curl_lock_access = 0; +pub const CURL_LOCK_ACCESS_SHARED: curl_lock_access = 1; +pub const CURL_LOCK_ACCESS_SINGLE: curl_lock_access = 2; + +pub type curl_lock_function = extern fn(*mut CURL, + curl_lock_data, + curl_lock_access, + *mut c_void); +pub type curl_unlock_function = extern fn(*mut CURL, + curl_lock_data, + *mut c_void); + +pub enum CURLSH {} + +pub type CURLSHcode = __enum_ty; +pub const CURLSHE_OK: CURLSHcode = 0; +pub const CURLSHE_BAD_OPTION: CURLSHcode = 1; +pub const CURLSHE_IN_USE: CURLSHcode = 2; +pub const CURLSHE_INVALID: CURLSHcode = 3; +pub const CURLSHE_NOMEM: CURLSHcode = 4; +// pub const CURLSHE_NOT_BUILT_IN: CURLSHcode = 5; + +pub type CURLSHoption = __enum_ty; +pub const CURLSHOPT_NONE: CURLSHoption = 0; +pub const CURLSHOPT_SHARE: CURLSHoption = 1; +pub const CURLSHOPT_UNSHARE: CURLSHoption = 2; +pub const CURLSHOPT_LOCKFUNC: CURLSHoption = 3; +pub const CURLSHOPT_UNLOCKFUNC: CURLSHoption = 4; +pub const CURLSHOPT_USERDATA: CURLSHoption = 5; + +pub const CURLVERSION_FIRST: CURLversion = 0; +pub const CURLVERSION_SECOND: CURLversion = 1; +pub const CURLVERSION_THIRD: CURLversion = 2; +pub const CURLVERSION_FOURTH: CURLversion = 3; + +#[repr(C)] +pub struct curl_version_info_data { + pub age: CURLversion, + pub version: *const c_char, + pub version_num: c_uint, + pub host: *const c_char, + pub features: c_int, + pub ssl_version: *const c_char, + pub ssl_version_num: c_long, + pub libz_version: *const c_char, + pub protocols: *const *const c_char, + pub ares: *const c_char, + pub ares_num: c_int, + pub libidn: *const c_char, + pub iconv_ver_num: c_int, + pub libssh_version: *const c_char, +} + +pub const CURL_VERSION_IPV6: c_int = 1 << 0; +pub const CURL_VERSION_KERBEROS4: c_int = 1 << 1; +pub const CURL_VERSION_SSL: c_int = 1 << 2; +pub const CURL_VERSION_LIBZ: c_int = 1 << 3; +pub const CURL_VERSION_NTLM: c_int = 1 << 4; +pub const CURL_VERSION_GSSNEGOTIATE: c_int = 1 << 5; +pub const CURL_VERSION_DEBUG: c_int = 1 << 6; +pub const CURL_VERSION_ASYNCHDNS: c_int = 1 << 7; +pub const CURL_VERSION_SPNEGO: c_int = 1 << 8; +pub const CURL_VERSION_LARGEFILE: c_int = 1 << 9; +pub const CURL_VERSION_IDN: c_int = 1 << 10; +pub const CURL_VERSION_SSPI: c_int = 1 << 11; +pub const CURL_VERSION_CONV: c_int = 1 << 12; +pub const CURL_VERSION_CURLDEBUG: c_int = 1 << 13; +pub const CURL_VERSION_TLSAUTH_SRP: c_int = 1 << 14; +pub const CURL_VERSION_NTLM_WB: c_int = 1 << 15; +// pub const CURL_VERSION_HTTP2: c_int = 1 << 16; + +pub const CURLPAUSE_RECV: c_int = 1 << 0; +pub const CURLPAUSE_RECV_CONT: c_int = 0; +pub const CURLPAUSE_SEND: c_int = 1 << 2; +pub const CURLPAUSE_SEND_CONT: c_int = 0; + +pub enum CURLM {} + +pub type CURLMcode = c_int; +pub const CURLM_CALL_MULTI_PERFORM: CURLMcode = -1; +pub const CURLM_OK: CURLMcode = 0; +pub const CURLM_BAD_HANDLE: CURLMcode = 1; +pub const CURLM_BAD_EASY_HANDLE: CURLMcode = 2; +pub const CURLM_OUT_OF_MEMORY: CURLMcode = 3; +pub const CURLM_INTERNAL_ERROR: CURLMcode = 4; +pub const CURLM_BAD_SOCKET: CURLMcode = 5; +pub const CURLM_UNKNOWN_OPTION: CURLMcode = 6; +// pub const CURLM_ADDED_ALREADY: CURLMcode = 7; + +pub type CURLMSG = __enum_ty; +pub const CURLMSG_NONE: CURLMSG = 0; +pub const CURLMSG_DONE: CURLMSG = 1; + +#[repr(C)] +pub struct CURLMsg { + pub msg: CURLMSG, + pub easy_handle: *mut CURL, + pub data: *mut c_void, +} + +pub const CURL_WAIT_POLLIN: c_short = 0x1; +pub const CURL_WAIT_POLLPRI: c_short = 0x2; +pub const CURL_WAIT_POLLOUT: c_short = 0x4; + +#[repr(C)] +pub struct curl_waitfd { + pub fd: curl_socket_t, + pub events: c_short, + pub revents: c_short, +} + +pub const CURL_POLL_NONE: c_int = 0; +pub const CURL_POLL_IN: c_int = 1; +pub const CURL_POLL_OUT: c_int = 2; +pub const CURL_POLL_INOUT: c_int = 3; +pub const CURL_POLL_REMOVE: c_int = 4; +pub const CURL_CSELECT_IN: c_int = 1; +pub const CURL_CSELECT_OUT: c_int = 2; +pub const CURL_CSELECT_ERR: c_int = 4; +pub const CURL_SOCKET_TIMEOUT: curl_socket_t = CURL_SOCKET_BAD; + +pub type curl_socket_callback = extern fn(*mut CURL, + curl_socket_t, + c_int, + *mut c_void, + *mut c_void) -> c_int; +pub type curl_multi_timer_callback = extern fn(*mut CURLM, + c_long, + *mut c_void) -> c_int; + +pub type CURLMoption = __enum_ty; +pub const CURLMOPT_SOCKETFUNCTION: CURLMoption = CURLOPTTYPE_FUNCTIONPOINT + 1; +pub const CURLMOPT_SOCKETDATA: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 2; +pub const CURLMOPT_PIPELINING: CURLMoption = CURLOPTTYPE_LONG + 3; +pub const CURLMOPT_TIMERFUNCTION: CURLMoption = CURLOPTTYPE_FUNCTIONPOINT + 4; +pub const CURLMOPT_TIMERDATA: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 5; +// pub const CURLMOPT_MAXCONNECTS: CURLMoption = CURLOPTTYPE_LONG + 6; +// pub const CURLMOPT_MAX_HOST_CONNECTIONS: CURLMoption = CURLOPTTYPE_LONG + 7; +// pub const CURLMOPT_MAX_PIPELINE_LENGTH: CURLMoption = CURLOPTTYPE_LONG + 8; +// pub const CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE: CURLMoption = CURLOPTTYPE_OFF_T + 9; +// pub const CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE: CURLMoption = CURLOPTTYPE_OFF_T + 10; +// pub const CURLMOPT_PIPELINING_SITE_BL: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 11; +// pub const CURLMOPT_PIPELINING_SERVER_BL: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 12; +// pub const CURLMOPT_MAX_TOTAL_CONNECTIONS: CURLMoption = CURLOPTTYPE_LONG + 13; + +pub const CURL_ERROR_SIZE: usize = 256; + +extern { + pub fn curl_formadd(httppost: *mut *mut curl_httppost, + last_post: *mut *mut curl_httppost, + ...) -> CURLFORMcode; + pub fn curl_formget(form: *mut curl_httppost, + arg: *mut c_void, + append: curl_formget_callback) -> c_int; + pub fn curl_formfree(form: *mut curl_httppost); + + pub fn curl_version() -> *mut c_char; + + pub fn curl_easy_escape(handle: *mut CURL, + string: *const c_char, + length: c_int) -> *mut c_char; + pub fn curl_easy_unescape(handle: *mut CURL, + string: *const c_char, + length: c_int, + outlength: *mut c_int) -> *mut c_char; + pub fn curl_free(p: *mut c_void); + + pub fn curl_global_init(flags: c_long) -> CURLcode; + pub fn curl_global_init_mem(flags: c_long, + m: curl_malloc_callback, + f: curl_free_callback, + r: curl_realloc_callback, + s: curl_strdup_callback, + c: curl_calloc_callback) -> CURLcode; + pub fn curl_global_cleanup(); + + pub fn curl_slist_append(list: *mut curl_slist, + val: *const c_char) -> *mut curl_slist; + pub fn curl_slist_free_all(list: *mut curl_slist); + + pub fn curl_getdate(p: *const c_char, _: *const time_t) -> time_t; + + pub fn curl_share_init() -> *mut CURLSH; + pub fn curl_share_setopt(sh: *mut CURLSH, + opt: CURLSHoption, + ...) -> CURLSHcode; + pub fn curl_share_cleanup(sh: *mut CURLSH) -> CURLSHcode; + + pub fn curl_version_info(t: CURLversion) -> *mut curl_version_info_data; + + pub fn curl_easy_strerror(code: CURLcode) -> *const c_char; + pub fn curl_share_strerror(code: CURLSHcode) -> *const c_char; + pub fn curl_easy_pause(handle: *mut CURL, bitmask: c_int) -> CURLcode; + + pub fn curl_easy_init() -> *mut CURL; + pub fn curl_easy_setopt(curl: *mut CURL, option: CURLoption, ...) -> CURLcode; + pub fn curl_easy_perform(curl: *mut CURL) -> CURLcode; + pub fn curl_easy_cleanup(curl: *mut CURL); + pub fn curl_easy_getinfo(curl: *mut CURL, info: CURLINFO, ...) -> CURLcode; + pub fn curl_easy_duphandle(curl: *mut CURL) -> *mut CURL; + pub fn curl_easy_reset(curl: *mut CURL); + pub fn curl_easy_recv(curl: *mut CURL, + buffer: *mut c_void, + buflen: size_t, + n: *mut size_t) -> CURLcode; + pub fn curl_easy_send(curl: *mut CURL, + buffer: *const c_void, + buflen: size_t, + n: *mut size_t) -> CURLcode; + + pub fn curl_multi_init() -> *mut CURLM; + pub fn curl_multi_add_handle(multi_handle: *mut CURLM, + curl_handle: *mut CURL) -> CURLMcode; + pub fn curl_multi_remove_handle(multi_handle: *mut CURLM, + curl_handle: *mut CURL) -> CURLMcode; + pub fn curl_multi_fdset(multi_handle: *mut CURLM, + read_fd_set: *mut fd_set, + write_fd_set: *mut fd_set, + exc_fd_set: *mut fd_set, + max_fd: *mut c_int) -> CURLMcode; + pub fn curl_multi_wait(multi_handle: *mut CURLM, + extra_fds: *mut curl_waitfd, + extra_nfds: c_uint, + timeout_ms: c_int, + ret: *mut c_int) -> CURLMcode; + pub fn curl_multi_perform(multi_handle: *mut CURLM, + running_handles: *mut c_int) -> CURLMcode; + pub fn curl_multi_cleanup(multi_handle: *mut CURLM) -> CURLMcode; + pub fn curl_multi_info_read(multi_handle: *mut CURLM, + msgs_in_queue: *mut c_int) -> *mut CURLMsg; + pub fn curl_multi_strerror(code: CURLMcode) -> *const c_char; + pub fn curl_multi_socket(multi_handle: *mut CURLM, + s: curl_socket_t, + running_handles: *mut c_int) -> CURLMcode; + pub fn curl_multi_socket_action(multi_handle: *mut CURLM, + s: curl_socket_t, + ev_bitmask: c_int, + running_handles: *mut c_int) -> CURLMcode; + pub fn curl_multi_socket_all(multi_handle: *mut CURLM, + running_handles: *mut c_int) -> CURLMcode; + pub fn curl_multi_timeout(multi_handle: *mut CURLM, + milliseconds: *mut c_long) -> CURLMcode; + pub fn curl_multi_setopt(multi_handle: *mut CURLM, + option: CURLMoption, + ...) -> CURLMcode; + pub fn curl_multi_assign(multi_handle: *mut CURLM, + sockfd: curl_socket_t, + sockp: *mut c_void) -> CURLMcode; +} diff -Nru cargo-0.17.0/vendor/curl-sys-0.3.6/build.rs cargo-0.19.0/vendor/curl-sys-0.3.6/build.rs --- cargo-0.17.0/vendor/curl-sys-0.3.6/build.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/curl-sys-0.3.6/build.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,302 +0,0 @@ -extern crate pkg_config; -extern crate gcc; - -use std::env; -use std::ffi::OsString; -use std::fs; -use std::path::{PathBuf, Path}; -use std::process::Command; -use std::io::ErrorKind; - -macro_rules! t { - ($e:expr) => (match $e { - Ok(t) => t, - Err(e) => panic!("{} return the error {}", stringify!($e), e), - }) -} - -fn main() { - let target = env::var("TARGET").unwrap(); - let host = env::var("HOST").unwrap(); - let src = env::current_dir().unwrap(); - let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap()); - let windows = target.contains("windows"); - - // OSX ships libcurl by default, so we just use that version - // unconditionally. - if target.contains("apple") { - return println!("cargo:rustc-flags=-l curl"); - } - - // Illumos/Solaris requires explicit linking with libnsl - if target.contains("solaris") { - println!("cargo:rustc-flags=-l nsl"); - } - - // Next, fall back and try to use pkg-config if its available. - if !target.contains("windows") { - match pkg_config::find_library("libcurl") { - Ok(lib) => { - for path in lib.include_paths.iter() { - println!("cargo:include={}", path.display()); - } - return - } - Err(e) => println!("Couldn't find libcurl from \ - pkgconfig ({:?}), compiling it from source...", e), - } - } - - if !Path::new("curl/.git").exists() { - let _ = Command::new("git").args(&["submodule", "update", "--init"]) - .status(); - } - - println!("cargo:rustc-link-search={}/lib", dst.display()); - println!("cargo:rustc-link-lib=static=curl"); - println!("cargo:root={}", dst.display()); - println!("cargo:include={}/include", dst.display()); - if windows { - println!("cargo:rustc-link-lib=ws2_32"); - println!("cargo:rustc-link-lib=crypt32"); - } - - // MSVC builds are just totally different - if target.contains("msvc") { - return build_msvc(&target); - } - - let openssl_root = register_dep("OPENSSL"); - let zlib_root = register_dep("Z"); - - let cfg = gcc::Config::new(); - let compiler = cfg.get_compiler(); - - let _ = fs::create_dir(&dst.join("build")); - - let mut cmd = Command::new("sh"); - let mut cflags = OsString::new(); - for arg in compiler.args() { - cflags.push(arg); - cflags.push(" "); - } - - // Can't run ./configure directly on msys2 b/c we're handing in - // Windows-style paths (those starting with C:\), but it chokes on those. - // For that reason we build up a shell script with paths converted to - // posix versions hopefully... - // - // Also apparently the buildbots choke unless we manually set LD, who knows - // why?! - cmd.env("CC", compiler.path()) - .env("CFLAGS", cflags) - .env("LD", &which("ld").unwrap()) - .env("VERBOSE", "1") - .current_dir(&dst.join("build")) - .arg(msys_compatible(&src.join("curl/configure"))); - - // For now this build script doesn't support paths with spaces in them. This - // is arguably a but in curl's configure script, but we could also try to - // paper over it by using a tmp directory which *doesn't* have spaces in it. - // As of now though that's not implemented so just give a nicer error for - // the time being. - let wants_space_error = windows && - (dst.to_str().map(|s| s.contains(" ")).unwrap_or(false) || - src.to_str().map(|s| s.contains(" ")).unwrap_or(false)); - if wants_space_error { - panic!("\n\nunfortunately ./configure of libcurl is known to \ - fail if there's a space in the path to the current \ - directory\n\n\ - there's a space in either\n {}\n {}\nand this will cause the \ - build to fail\n\n\ - the MSVC build should work with a directory that has \ - spaces in it, and it would also work to move this to a \ - different directory without spaces\n\n", - src.display(), dst.display()) - } - - if windows { - cmd.arg("--with-winssl"); - } else { - cmd.arg("--without-ca-bundle"); - cmd.arg("--without-ca-path"); - } - if let Some(root) = openssl_root { - cmd.arg(format!("--with-ssl={}", msys_compatible(&root))); - } - if let Some(root) = zlib_root { - cmd.arg(format!("--with-zlib={}", msys_compatible(&root))); - } - cmd.arg("--enable-static=yes"); - cmd.arg("--enable-shared=no"); - match &env::var("PROFILE").unwrap()[..] { - "bench" | "release" => { - cmd.arg("--enable-optimize"); - } - _ => { - cmd.arg("--enable-debug"); - cmd.arg("--disable-optimize"); - } - } - cmd.arg(format!("--prefix={}", msys_compatible(&dst))); - - if target != host && - (!target.contains("windows") || !host.contains("windows")) { - cmd.arg(format!("--host={}", host)); - cmd.arg(format!("--target={}", target)); - } - - cmd.arg("--without-librtmp"); - cmd.arg("--without-libidn"); - cmd.arg("--without-libssh2"); - cmd.arg("--without-nghttp2"); - cmd.arg("--disable-ldap"); - cmd.arg("--disable-ldaps"); - cmd.arg("--disable-ftp"); - cmd.arg("--disable-rtsp"); - cmd.arg("--disable-dict"); - cmd.arg("--disable-telnet"); - cmd.arg("--disable-tftp"); - cmd.arg("--disable-pop3"); - cmd.arg("--disable-imap"); - cmd.arg("--disable-smtp"); - cmd.arg("--disable-gopher"); - cmd.arg("--disable-manual"); - cmd.arg("--disable-smb"); - cmd.arg("--disable-sspi"); - - run(&mut cmd, "sh"); - run(Command::new(make()) - .arg(&format!("-j{}", env::var("NUM_JOBS").unwrap())) - .current_dir(&dst.join("build")), "make"); - run(Command::new(make()) - .arg("install") - .current_dir(&dst.join("build")), "make"); -} - -fn run(cmd: &mut Command, program: &str) { - println!("running: {:?}", cmd); - let status = match cmd.status() { - Ok(status) => status, - Err(ref e) if e.kind() == ErrorKind::NotFound => { - fail(&format!("failed to execute command: {}\nis `{}` not installed?", - e, program)); - } - Err(e) => fail(&format!("failed to execute command: {}", e)), - }; - if !status.success() { - fail(&format!("command did not execute successfully, got: {}", status)); - } -} - -fn fail(s: &str) -> ! { - panic!("\n{}\n\nbuild script failed, must exit now", s) -} - -fn make() -> &'static str { - if cfg!(target_os = "freebsd") {"gmake"} else {"make"} -} - -fn which(cmd: &str) -> Option { - let cmd = format!("{}{}", cmd, env::consts::EXE_SUFFIX); - let paths = env::var_os("PATH").unwrap(); - env::split_paths(&paths).map(|p| p.join(&cmd)).find(|p| { - fs::metadata(p).is_ok() - }) -} - -fn msys_compatible(path: &Path) -> String { - let path = path.to_str().unwrap(); - if !cfg!(windows) { - return path.to_string() - } - path.replace("C:\\", "/c/") - .replace("\\", "/") -} - -fn register_dep(dep: &str) -> Option { - if let Some(s) = env::var_os(&format!("DEP_{}_ROOT", dep)) { - prepend("PKG_CONFIG_PATH", Path::new(&s).join("lib/pkgconfig")); - return Some(s.into()) - } - if let Some(s) = env::var_os(&format!("DEP_{}_INCLUDE", dep)) { - let root = Path::new(&s).parent().unwrap(); - env::set_var(&format!("DEP_{}_ROOT", dep), root); - let path = root.join("lib/pkgconfig"); - if path.exists() { - prepend("PKG_CONFIG_PATH", path); - return Some(root.to_path_buf()) - } - } - - return None; - - fn prepend(var: &str, val: PathBuf) { - let prefix = env::var(var).unwrap_or(String::new()); - let mut v = vec![val]; - v.extend(env::split_paths(&prefix)); - env::set_var(var, &env::join_paths(v).unwrap()); - } -} - -fn build_msvc(target: &str) { - let cmd = gcc::windows_registry::find(target, "nmake.exe"); - let mut cmd = cmd.unwrap_or(Command::new("nmake.exe")); - let src = env::current_dir().unwrap(); - let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap()); - let machine = if target.starts_with("x86_64") { - "x64" - } else if target.starts_with("i686") { - "x86" - } else { - panic!("unknown msvc target: {}", target); - }; - - t!(fs::create_dir_all(dst.join("include/curl"))); - t!(fs::create_dir_all(dst.join("lib"))); - - cmd.current_dir(src.join("curl/winbuild")); - cmd.arg("/f").arg("Makefile.vc") - .arg("MODE=static") - .arg("ENABLE_IDN=yes") - .arg("DEBUG=no") - .arg("GEN_PDB=no") - .arg("ENABLE_WINSSL=yes") - .arg("ENABLE_SSPI=yes") - .arg(format!("MACHINE={}", machine)); - - let features = env::var("CARGO_CFG_TARGET_FEATURE") - .unwrap_or(String::new()); - if features.contains("crt-static") { - cmd.arg("RTLIBCFG=static"); - } - - if let Some(inc) = env::var_os("DEP_Z_ROOT") { - let inc = PathBuf::from(inc); - let mut s = OsString::from("WITH_DEVEL="); - s.push(&inc); - cmd.arg("WITH_ZLIB=static").arg(s); - - // the build system for curl expects this library to be called - // zlib_a.lib, so make sure it's named correctly (where libz-sys just - // produces zlib.lib) - let _ = fs::remove_file(&inc.join("lib/zlib_a.lib")); - t!(fs::hard_link(inc.join("lib/zlib.lib"), inc.join("lib/zlib_a.lib"))); - } - run(&mut cmd, "nmake"); - - let name = format!("libcurl-vc-{}-release-static-zlib-static-\ - ipv6-sspi-winssl", machine); - let libs = src.join("curl/builds").join(name); - - t!(fs::copy(libs.join("lib/libcurl_a.lib"), dst.join("lib/curl.lib"))); - for f in t!(fs::read_dir(libs.join("include/curl"))) { - let path = t!(f).path(); - let dst = dst.join("include/curl").join(path.file_name().unwrap()); - t!(fs::copy(path, dst)); - } - t!(fs::remove_dir_all(src.join("curl/builds"))); - println!("cargo:rustc-link-lib=wldap32"); - println!("cargo:rustc-link-lib=advapi32"); - println!("cargo:rustc-link-lib=normaliz"); -} diff -Nru cargo-0.17.0/vendor/curl-sys-0.3.6/.cargo-checksum.json cargo-0.19.0/vendor/curl-sys-0.3.6/.cargo-checksum.json --- cargo-0.17.0/vendor/curl-sys-0.3.6/.cargo-checksum.json 2017-03-24 16:59:55.000000000 +0000 +++ cargo-0.19.0/vendor/curl-sys-0.3.6/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files": {"build.rs": "ff2b0f5d843d6fdc242f05f2b80d027d7147a6e661ea6e6dd9c440febad00f9d", "Cargo.toml": "fa26a7c053b489bbf347d17d59658fee2295914c71e35cf7beb73c0157eac74f", ".cargo-ok": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "lib.rs": "bd481920373ac19d4995b4300bd5f56a9dcdf812aa285831800b5b636c22d2cc"}, "package": "218a149208e1f4e5f7e20f1d0ed1e9431a086a6b4333ff95dba82237be9c283a"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/curl-sys-0.3.6/Cargo.toml cargo-0.19.0/vendor/curl-sys-0.3.6/Cargo.toml --- cargo-0.17.0/vendor/curl-sys-0.3.6/Cargo.toml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/curl-sys-0.3.6/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -[package] -name = "curl-sys" -version = "0.3.6" -authors = ["Carl Lerche ", - "Alex Crichton "] -links = "curl" -build = "build.rs" -license = "MIT" -repository = "https://github.com/carllerche/curl-rust" -description = "Native bindings to the libcurl library" - -[build-dependencies] -pkg-config = "0.3" -gcc = "0.3.10" - -[lib] -name = "curl_sys" -path = "lib.rs" - -[dependencies] -libz-sys = ">= 0" -libc = "0.2" - -[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies] -openssl-sys = "0.9" - -[target."cfg(windows)".dependencies] -winapi = "0.2" diff -Nru cargo-0.17.0/vendor/curl-sys-0.3.6/lib.rs cargo-0.19.0/vendor/curl-sys-0.3.6/lib.rs --- cargo-0.17.0/vendor/curl-sys-0.3.6/lib.rs 2017-03-24 16:59:55.000000000 +0000 +++ cargo-0.19.0/vendor/curl-sys-0.3.6/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1017 +0,0 @@ -#![allow(bad_style)] - -extern crate libc; -#[cfg(not(target_env = "msvc"))] -extern crate libz_sys; -#[cfg(all(unix, not(target_os = "macos")))] -extern crate openssl_sys; -#[cfg(windows)] -extern crate winapi; - -use libc::{c_int, c_char, c_uint, c_long, c_double, c_void, size_t, time_t}; -use libc::c_ulong; - -#[cfg(unix)] -use libc::fd_set; -#[cfg(windows)] -use winapi::fd_set; - -#[cfg(target_env = "msvc")] -#[doc(hidden)] -pub type __enum_ty = libc::c_int; -#[cfg(not(target_env = "msvc"))] -#[doc(hidden)] -pub type __enum_ty = libc::c_uint; - -pub type CURLINFO = __enum_ty; -pub type CURLoption = __enum_ty; -pub type CURLcode = __enum_ty; -pub type CURLversion = __enum_ty; -pub type curl_off_t = i64; - -pub enum CURL {} - -#[cfg(unix)] -pub type curl_socket_t = libc::c_int; -#[cfg(unix)] -pub const CURL_SOCKET_BAD: curl_socket_t = -1; -#[cfg(all(windows, target_pointer_width = "32"))] -pub type curl_socket_t = libc::c_uint; -#[cfg(all(windows, target_pointer_width = "64"))] -pub type curl_socket_t = u64; -#[cfg(windows)] -pub const CURL_SOCKET_BAD: curl_socket_t = !0; - -pub enum curl_httppost { - // Note that this changed in some versions of libcurl, so we currently don't - // bind the fields as they're apparently not stable. - // pub next: *mut curl_httppost, - // pub name: *mut c_char, - // pub namelength: c_long, - // pub contents: *mut c_char, - // pub contentslength: c_long, - // pub buffer: *mut c_char, - // pub bufferlength: c_long, - // pub contenttype: *mut c_char, - // pub contentheader: *mut curl_slist, - // pub more: *mut curl_httppost, - // pub flags: c_long, - // pub showfilename: *mut c_char, - // pub userp: *mut c_void, -} - -// pub const HTTPPOST_FILENAME: c_long = 1 << 0; -// pub const HTTPPOST_READFILE: c_long = 1 << 1; -// pub const HTTPPOST_PTRNAME: c_long = 1 << 2; -// pub const HTTPPOST_PTRCONTENTS: c_long = 1 << 3; -// pub const HTTPPOST_BUFFER: c_long = 1 << 4; -// pub const HTTPPOST_PTRBUFFER: c_long = 1 << 5; -// pub const HTTPPOST_CALLBACK: c_long = 1 << 6; - -pub type curl_progress_callback = extern fn(*mut c_void, - c_double, - c_double, - c_double, - c_double) -> c_int; -// pub type curl_xferinfo_callback = extern fn(*mut c_void, -// curl_off_t, -// curl_off_t, -// curl_off_t, -// curl_off_t) -> c_int; - -pub const CURL_WRITEFUNC_PAUSE: size_t = 0x10000001; - -pub type curl_write_callback = extern fn(*mut c_char, - size_t, - size_t, - *mut c_void) -> size_t; - -pub type curlfiletype = __enum_ty; -pub const CURLFILETYPE_FILE: curlfiletype = 0; -pub const CURLFILETYPE_DIRECTORY: curlfiletype = 1; -pub const CURLFILETYPE_SYMLINK: curlfiletype = 2; -pub const CURLFILETYPE_DEVICE_BLOCK: curlfiletype = 3; -pub const CURLFILETYPE_DEVICE_CHAR: curlfiletype = 4; -pub const CURLFILETYPE_NAMEDPIPE: curlfiletype = 5; -pub const CURLFILETYPE_SOCKET: curlfiletype = 6; -pub const CURLFILETYPE_DOOR: curlfiletype = 7; -pub const CURLFILETYPE_UNKNOWN: curlfiletype = 8; - -pub const CURLFINFOFLAG_KNOWN_FILENAME: c_uint = 1 << 0; -pub const CURLFINFOFLAG_KNOWN_FILETYPE: c_uint = 1 << 1; -pub const CURLFINFOFLAG_KNOWN_TIME: c_uint = 1 << 2; -pub const CURLFINFOFLAG_KNOWN_PERM: c_uint = 1 << 3; -pub const CURLFINFOFLAG_KNOWN_UID: c_uint = 1 << 4; -pub const CURLFINFOFLAG_KNOWN_GID: c_uint = 1 << 5; -pub const CURLFINFOFLAG_KNOWN_SIZE: c_uint = 1 << 6; -pub const CURLFINFOFLAG_KNOWN_HLINKCOUNT: c_uint = 1 << 7; - -#[repr(C)] -pub struct curl_fileinfo { - pub filename: *mut c_char, - pub filetype: curlfiletype, - pub time: time_t, - pub perm: c_uint, - pub uid: c_int, - pub gid: c_int, - pub size: curl_off_t, - pub hardlinks: c_long, - - pub strings_time: *mut c_char, - pub strings_perm: *mut c_char, - pub strings_user: *mut c_char, - pub strings_group: *mut c_char, - pub strings_target: *mut c_char, - - pub flags: c_uint, - pub b_data: *mut c_char, - pub b_size: size_t, - pub b_used: size_t, -} - -pub const CURL_CHUNK_BGN_FUNC_OK: c_long = 0; -pub const CURL_CHUNK_BGN_FUNC_FAIL: c_long = 1; -pub const CURL_CHUNK_BGN_FUNC_SKIP: c_long = 2; -pub type curl_chunk_bgn_callback = extern fn(*const c_void, - *mut c_void, - c_int) -> c_long; - -pub const CURL_CHUNK_END_FUNC_OK: c_long = 0; -pub const CURL_CHUNK_END_FUNC_FAIL: c_long = 1; -pub type curl_chunk_end_callback = extern fn(*mut c_void) -> c_long; - -pub const CURL_FNMATCHFUNC_MATCH: c_int = 0; -pub const CURL_FNMATCHFUNC_NOMATCH: c_int = 1; -pub const CURL_FNMATCHFUNC_FAIL: c_int = 2; -pub type curl_fnmatch_callback = extern fn(*mut c_void, - *const c_char, - *const c_char) -> c_int; - -pub const CURL_SEEKFUNC_OK: c_int = 0; -pub const CURL_SEEKFUNC_FAIL: c_int = 1; -pub const CURL_SEEKFUNC_CANTSEEK: c_int = 2; -pub type curl_seek_callback = extern fn(*mut c_void, - curl_off_t, - c_int) -> c_int; - -pub const CURL_READFUNC_ABORT: size_t = 0x10000000; -pub const CURL_READFUNC_PAUSE: size_t = 0x10000001; -pub type curl_read_callback = extern fn(*mut c_char, - size_t, - size_t, - *mut c_void) -> size_t; - -// pub type curlsocktype = __enum_ty; -// pub const CURLSOCKTYPE_IPCXN: curlsocktype = 0; -// pub const CURLSOCKTYPE_ACCEPT: curlsocktype = 1; -// pub const CURL_SOCKOPT_OK: c_int = 0; -// pub const CURL_SOCKOPT_ERROR: c_int = 1; -// pub const CURL_SOCKOPT_ALREADY_CONNECTED: c_int = 2; -// pub type curl_sockopt_callback = extern fn(*mut c_void, -// curl_socket_t, -// curlsocktype) -> c_int; - -// TODO: sort out libc::sockaddr on windows -// #[repr(C)] -// pub struct curl_sockaddr { -// pub family: c_int, -// pub socktype: c_int, -// pub protocol: c_int, -// pub addrlen: c_uint, -// pub addr: libc::sockaddr, -// } -// -// pub type curl_opensocket_callback = extern fn(*mut c_void, -// curlsocktype, -// *mut curl_sockaddr) -> curl_socket_t; - -pub type curlioerr = __enum_ty; -pub const CURLIOE_OK: curlioerr = 0; -pub const CURLIOE_UNKNOWNCMD: curlioerr = 1; -pub const CURLIOE_FAILRESTART: curlioerr = 2; - -pub type curliocmd = __enum_ty; -pub const CURLIOCMD_NOP: curliocmd = 0; -pub const CURLIOCMD_RESTARTREAD: curliocmd = 1; - -pub type curl_ioctl_callback = extern fn(*mut CURL, c_int, *mut c_void) -> curlioerr; - -pub type curl_malloc_callback = extern fn(size_t) -> *mut c_void; -pub type curl_free_callback = extern fn(*mut c_void); -pub type curl_realloc_callback = extern fn(*mut c_void, size_t) -> *mut c_void; -pub type curl_strdup_callback = extern fn(*const c_char) -> *mut c_char; -pub type curl_calloc_callback = extern fn(size_t, size_t) -> *mut c_void; - -pub type curl_infotype = __enum_ty; -pub const CURLINFO_TEXT: curl_infotype = 0; -pub const CURLINFO_HEADER_IN: curl_infotype = 1; -pub const CURLINFO_HEADER_OUT: curl_infotype = 2; -pub const CURLINFO_DATA_IN: curl_infotype = 3; -pub const CURLINFO_DATA_OUT: curl_infotype = 4; -pub const CURLINFO_SSL_DATA_IN: curl_infotype = 5; -pub const CURLINFO_SSL_DATA_OUT: curl_infotype = 6; - -pub type curl_debug_callback = extern fn(*mut CURL, - curl_infotype, - *mut c_char, - size_t, - *mut c_void) -> c_int; - -pub const CURLE_OK: CURLcode = 0; -pub const CURLE_UNSUPPORTED_PROTOCOL: CURLcode = 1; -pub const CURLE_FAILED_INIT: CURLcode = 2; -pub const CURLE_URL_MALFORMAT: CURLcode = 3; -// pub const CURLE_NOT_BUILT_IN: CURLcode = 4; -pub const CURLE_COULDNT_RESOLVE_PROXY: CURLcode = 5; -pub const CURLE_COULDNT_RESOLVE_HOST: CURLcode = 6; -pub const CURLE_COULDNT_CONNECT: CURLcode = 7; -pub const CURLE_FTP_WEIRD_SERVER_REPLY: CURLcode = 8; -pub const CURLE_REMOTE_ACCESS_DENIED: CURLcode = 9; -// pub const CURLE_FTP_ACCEPT_FAILED: CURLcode = 10; -pub const CURLE_FTP_WEIRD_PASS_REPLY: CURLcode = 11; -// pub const CURLE_FTP_ACCEPT_TIMEOUT: CURLcode = 12; -pub const CURLE_FTP_WEIRD_PASV_REPLY: CURLcode = 13; -pub const CURLE_FTP_WEIRD_227_FORMAT: CURLcode = 14; -pub const CURLE_FTP_CANT_GET_HOST: CURLcode = 15; -pub const CURLE_OBSOLETE16: CURLcode = 16; -pub const CURLE_FTP_COULDNT_SET_TYPE: CURLcode = 17; -pub const CURLE_PARTIAL_FILE: CURLcode = 18; -pub const CURLE_FTP_COULDNT_RETR_FILE: CURLcode = 19; -pub const CURLE_OBSOLETE20: CURLcode = 20; -pub const CURLE_QUOTE_ERROR: CURLcode = 21; -pub const CURLE_HTTP_RETURNED_ERROR: CURLcode = 22; -pub const CURLE_WRITE_ERROR: CURLcode = 23; -pub const CURLE_OBSOLETE24: CURLcode = 24; -pub const CURLE_UPLOAD_FAILED: CURLcode = 25; -pub const CURLE_READ_ERROR: CURLcode = 26; -pub const CURLE_OUT_OF_MEMORY: CURLcode = 27; -pub const CURLE_OPERATION_TIMEDOUT: CURLcode = 28; -pub const CURLE_OBSOLETE29: CURLcode = 29; -pub const CURLE_FTP_PORT_FAILED: CURLcode = 30; -pub const CURLE_FTP_COULDNT_USE_REST: CURLcode = 31; -pub const CURLE_OBSOLETE32: CURLcode = 32; -pub const CURLE_RANGE_ERROR: CURLcode = 33; -pub const CURLE_HTTP_POST_ERROR: CURLcode = 34; -pub const CURLE_SSL_CONNECT_ERROR: CURLcode = 35; -pub const CURLE_BAD_DOWNLOAD_RESUME: CURLcode = 36; -pub const CURLE_FILE_COULDNT_READ_FILE: CURLcode = 37; -pub const CURLE_LDAP_CANNOT_BIND: CURLcode = 38; -pub const CURLE_LDAP_SEARCH_FAILED: CURLcode = 39; -pub const CURLE_OBSOLETE40: CURLcode = 40; -pub const CURLE_FUNCTION_NOT_FOUND: CURLcode = 41; -pub const CURLE_ABORTED_BY_CALLBACK: CURLcode = 42; -pub const CURLE_BAD_FUNCTION_ARGUMENT: CURLcode = 43; -pub const CURLE_OBSOLETE44: CURLcode = 44; -pub const CURLE_INTERFACE_FAILED: CURLcode = 45; -pub const CURLE_OBSOLETE46: CURLcode = 46; -pub const CURLE_TOO_MANY_REDIRECTS : CURLcode = 47; -pub const CURLE_UNKNOWN_OPTION: CURLcode = 48; -pub const CURLE_TELNET_OPTION_SYNTAX : CURLcode = 49; -pub const CURLE_OBSOLETE50: CURLcode = 50; -pub const CURLE_PEER_FAILED_VERIFICATION: CURLcode = 51; -pub const CURLE_GOT_NOTHING: CURLcode = 52; -pub const CURLE_SSL_ENGINE_NOTFOUND: CURLcode = 53; -pub const CURLE_SSL_ENGINE_SETFAILED: CURLcode = 54; -pub const CURLE_SEND_ERROR: CURLcode = 55; -pub const CURLE_RECV_ERROR: CURLcode = 56; -pub const CURLE_OBSOLETE57: CURLcode = 57; -pub const CURLE_SSL_CERTPROBLEM: CURLcode = 58; -pub const CURLE_SSL_CIPHER: CURLcode = 59; -pub const CURLE_SSL_CACERT: CURLcode = 60; -pub const CURLE_BAD_CONTENT_ENCODING: CURLcode = 61; -pub const CURLE_LDAP_INVALID_URL: CURLcode = 62; -pub const CURLE_FILESIZE_EXCEEDED: CURLcode = 63; -pub const CURLE_USE_SSL_FAILED: CURLcode = 64; -pub const CURLE_SEND_FAIL_REWIND: CURLcode = 65; -pub const CURLE_SSL_ENGINE_INITFAILED: CURLcode = 66; -pub const CURLE_LOGIN_DENIED: CURLcode = 67; -pub const CURLE_TFTP_NOTFOUND: CURLcode = 68; -pub const CURLE_TFTP_PERM: CURLcode = 69; -pub const CURLE_REMOTE_DISK_FULL: CURLcode = 70; -pub const CURLE_TFTP_ILLEGAL: CURLcode = 71; -pub const CURLE_TFTP_UNKNOWNID: CURLcode = 72; -pub const CURLE_REMOTE_FILE_EXISTS: CURLcode = 73; -pub const CURLE_TFTP_NOSUCHUSER: CURLcode = 74; -pub const CURLE_CONV_FAILED: CURLcode = 75; -pub const CURLE_CONV_REQD: CURLcode = 76; -pub const CURLE_SSL_CACERT_BADFILE: CURLcode = 77; -pub const CURLE_REMOTE_FILE_NOT_FOUND: CURLcode = 78; -pub const CURLE_SSH: CURLcode = 79; -pub const CURLE_SSL_SHUTDOWN_FAILED: CURLcode = 80; -pub const CURLE_AGAIN: CURLcode = 81; -pub const CURLE_SSL_CRL_BADFILE: CURLcode = 82; -pub const CURLE_SSL_ISSUER_ERROR: CURLcode = 83; -pub const CURLE_FTP_PRET_FAILED: CURLcode = 84; -pub const CURLE_RTSP_CSEQ_ERROR: CURLcode = 85; -pub const CURLE_RTSP_SESSION_ERROR: CURLcode = 86; -pub const CURLE_FTP_BAD_FILE_LIST: CURLcode = 87; -pub const CURLE_CHUNK_FAILED: CURLcode = 88; -// pub const CURLE_NO_CONNECTION_AVAILABLE: CURLcode = 89; - -pub type curl_conv_callback = extern fn(*mut c_char, size_t) -> CURLcode; -pub type curl_ssl_ctx_callback = extern fn(*mut CURL, - *mut c_void, - *mut c_void) -> CURLcode; - -pub type curl_proxytype = __enum_ty; -pub const CURLPROXY_HTTP: curl_proxytype = 0; -pub const CURLPROXY_HTTP_1_0: curl_proxytype = 1; -pub const CURLPROXY_SOCKS4: curl_proxytype = 4; -pub const CURLPROXY_SOCKS5: curl_proxytype = 5; -pub const CURLPROXY_SOCKS4A: curl_proxytype = 6; -pub const CURLPROXY_SOCKS5_HOSTNAME: curl_proxytype = 7; - -pub const CURLAUTH_NONE: c_ulong = 0; -pub const CURLAUTH_BASIC: c_ulong = 1 << 0; -pub const CURLAUTH_DIGEST: c_ulong = 1 << 1; -pub const CURLAUTH_GSSNEGOTIATE: c_ulong = 1 << 2; -pub const CURLAUTH_NTLM: c_ulong = 1 << 3; -pub const CURLAUTH_DIGEST_IE: c_ulong = 1 << 4; -pub const CURLAUTH_NTLM_WB: c_ulong = 1 << 5; -// pub const CURLAUTH_ONLY: c_ulong = 1 << 31; -pub const CURLAUTH_ANY: c_ulong = !CURLAUTH_DIGEST_IE; -pub const CURLAUTH_ANYSAFE: c_ulong = !(CURLAUTH_BASIC | CURLAUTH_DIGEST_IE); - -// pub const CURLSSH_AUTH_ANY: c_ulong = !0; -// pub const CURLSSH_AUTH_NONE: c_ulong = 0; -// pub const CURLSSH_AUTH_PUBLICKEY: c_ulong = 1 << 0; -// pub const CURLSSH_AUTH_PASSWORD: c_ulong = 1 << 1; -// pub const CURLSSH_AUTH_HOST: c_ulong = 1 << 2; -// pub const CURLSSH_AUTH_KEYBOARD: c_ulong = 1 << 3; -// pub const CURLSSH_AUTH_AGENT: c_ulong = 1 << 4; -// pub const CURLSSH_AUTH_DEFAULT: c_ulong = CURLSSH_AUTH_ANY; - -pub const CURLGSSAPI_DELEGATION_NONE: c_ulong = 0; -pub const CURLGSSAPI_DELEGATION_POLICY_FLAG: c_ulong = 1 << 0; -pub const CURLGSSAPI_DELEGATION_FLAG: c_ulong = 1 << 1; - -// pub type curl_khtype = __enum_ty; -// pub const CURLKHTYPE_UNKNOWN: curl_khtype = 0; -// pub const CURLKHTYPE_RSA1: curl_khtype = 1; -// pub const CURLKHTYPE_RSA: curl_khtype = 2; -// pub const CURLKHTYPE_DSS: curl_khtype = 3; - -// #[repr(C)] -// pub struct curl_khkey { -// pub key: *const c_char, -// pub len: size_t, -// pub keytype: curl_khtype, -// } - -// pub type curl_khstat = __enum_ty; -// pub const CURLKHSTAT_FINE_ADD_TO_FILE: curl_khstat = 0; -// pub const CURLKHSTAT_FINE: curl_khstat = 1; -// pub const CURLKHSTAT_REJECT: curl_khstat = 2; -// pub const CURLKHSTAT_DEFER: curl_khstat = 3; -// -// pub type curl_khmatch = __enum_ty; -// pub const CURLKHMATCH_OK: curl_khmatch = 0; -// pub const CURLKHMATCH_MISMATCH: curl_khmatch = 1; -// pub const CURLKHMATCH_MISSING: curl_khmatch = 2; - -// pub type curl_sshkeycallback = extern fn(*mut CURL, -// *const curl_khkey, -// *const curl_khkey, -// curl_khmatch, -// *mut c_void) -> c_int; - -pub type curl_usessl = __enum_ty; -pub const CURLUSESSL_NONE: curl_usessl = 0; -pub const CURLUSESSL_TRY: curl_usessl = 1; -pub const CURLUSESSL_CONTROL: curl_usessl = 2; -pub const CURLUSESSL_ALL: curl_usessl = 3; - -pub const CURLPROTO_HTTP: c_int = 1 << 0; -pub const CURLPROTO_HTTPS: c_int = 1 << 1; -pub const CURLPROTO_FILE: c_int = 1 << 10; - -pub const CURLOPTTYPE_LONG: CURLoption = 0; -pub const CURLOPTTYPE_OBJECTPOINT: CURLoption = 10_000; -pub const CURLOPTTYPE_FUNCTIONPOINT: CURLoption = 20_000; -pub const CURLOPTTYPE_OFF_T: CURLoption = 30_000; - -pub const CURLOPT_FILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 1; -pub const CURLOPT_URL: CURLoption = CURLOPTTYPE_OBJECTPOINT + 2; -pub const CURLOPT_PORT: CURLoption = CURLOPTTYPE_LONG + 3; -pub const CURLOPT_PROXY: CURLoption = CURLOPTTYPE_OBJECTPOINT + 4; -pub const CURLOPT_USERPWD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 5; -pub const CURLOPT_PROXYUSERPWD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 6; -pub const CURLOPT_RANGE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 7; -pub const CURLOPT_INFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 9; -pub const CURLOPT_ERRORBUFFER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 10; -pub const CURLOPT_WRITEFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 11; -pub const CURLOPT_READFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 12; -pub const CURLOPT_TIMEOUT: CURLoption = CURLOPTTYPE_LONG + 13; -pub const CURLOPT_INFILESIZE: CURLoption = CURLOPTTYPE_LONG + 14; -pub const CURLOPT_POSTFIELDS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 15; -pub const CURLOPT_REFERER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 16; -pub const CURLOPT_FTPPORT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 17; -pub const CURLOPT_USERAGENT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 18; -pub const CURLOPT_LOW_SPEED_LIMIT: CURLoption = CURLOPTTYPE_LONG + 19; -pub const CURLOPT_LOW_SPEED_TIME: CURLoption = CURLOPTTYPE_LONG + 20; -pub const CURLOPT_RESUME_FROM: CURLoption = CURLOPTTYPE_LONG + 21; -pub const CURLOPT_COOKIE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 22; -pub const CURLOPT_HTTPHEADER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 23; -pub const CURLOPT_HTTPPOST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 24; -pub const CURLOPT_SSLCERT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 25; -pub const CURLOPT_KEYPASSWD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 26; -pub const CURLOPT_CRLF: CURLoption = CURLOPTTYPE_LONG + 27; -pub const CURLOPT_QUOTE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 28; -pub const CURLOPT_WRITEHEADER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 29; -pub const CURLOPT_COOKIEFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 31; -pub const CURLOPT_SSLVERSION: CURLoption = CURLOPTTYPE_LONG + 32; -pub const CURLOPT_TIMECONDITION: CURLoption = CURLOPTTYPE_LONG + 33; -pub const CURLOPT_TIMEVALUE: CURLoption = CURLOPTTYPE_LONG + 34; -pub const CURLOPT_CUSTOMREQUEST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 36; -pub const CURLOPT_STDERR: CURLoption = CURLOPTTYPE_OBJECTPOINT + 37; -pub const CURLOPT_POSTQUOTE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 39; -pub const CURLOPT_WRITEINFO: CURLoption = CURLOPTTYPE_OBJECTPOINT + 40; -pub const CURLOPT_VERBOSE: CURLoption = CURLOPTTYPE_LONG + 41; -pub const CURLOPT_HEADER: CURLoption = CURLOPTTYPE_LONG + 42; -pub const CURLOPT_NOPROGRESS: CURLoption = CURLOPTTYPE_LONG + 43; -pub const CURLOPT_NOBODY: CURLoption = CURLOPTTYPE_LONG + 44; -pub const CURLOPT_FAILONERROR: CURLoption = CURLOPTTYPE_LONG + 45; -pub const CURLOPT_UPLOAD: CURLoption = CURLOPTTYPE_LONG + 46; -pub const CURLOPT_POST: CURLoption = CURLOPTTYPE_LONG + 47; -pub const CURLOPT_DIRLISTONLY: CURLoption = CURLOPTTYPE_LONG + 48; -pub const CURLOPT_APPEND: CURLoption = CURLOPTTYPE_LONG + 50; -pub const CURLOPT_NETRC: CURLoption = CURLOPTTYPE_LONG + 51; -pub const CURLOPT_FOLLOWLOCATION: CURLoption = CURLOPTTYPE_LONG + 52; -pub const CURLOPT_TRANSFERTEXT: CURLoption = CURLOPTTYPE_LONG + 53; -pub const CURLOPT_PUT: CURLoption = CURLOPTTYPE_LONG + 54; -pub const CURLOPT_PROGRESSFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 56; -pub const CURLOPT_PROGRESSDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 57; -pub const CURLOPT_AUTOREFERER: CURLoption = CURLOPTTYPE_LONG + 58; -pub const CURLOPT_PROXYPORT: CURLoption = CURLOPTTYPE_LONG + 59; -pub const CURLOPT_POSTFIELDSIZE: CURLoption = CURLOPTTYPE_LONG + 60; -pub const CURLOPT_HTTPPROXYTUNNEL: CURLoption = CURLOPTTYPE_LONG + 61; -pub const CURLOPT_INTERFACE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 62; -pub const CURLOPT_KRBLEVEL: CURLoption = CURLOPTTYPE_OBJECTPOINT + 63; -pub const CURLOPT_SSL_VERIFYPEER: CURLoption = CURLOPTTYPE_LONG + 64; -pub const CURLOPT_CAINFO: CURLoption = CURLOPTTYPE_OBJECTPOINT + 65; -pub const CURLOPT_MAXREDIRS: CURLoption = CURLOPTTYPE_LONG + 68; -pub const CURLOPT_FILETIME: CURLoption = CURLOPTTYPE_LONG + 69; -pub const CURLOPT_TELNETOPTIONS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 70; -pub const CURLOPT_MAXCONNECTS: CURLoption = CURLOPTTYPE_LONG + 71; -pub const CURLOPT_CLOSEPOLICY: CURLoption = CURLOPTTYPE_LONG + 72; -pub const CURLOPT_FRESH_CONNECT: CURLoption = CURLOPTTYPE_LONG + 74; -pub const CURLOPT_FORBID_REUSE: CURLoption = CURLOPTTYPE_LONG + 75; -pub const CURLOPT_RANDOM_FILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 76; -pub const CURLOPT_EGDSOCKET: CURLoption = CURLOPTTYPE_OBJECTPOINT + 77; -pub const CURLOPT_CONNECTTIMEOUT: CURLoption = CURLOPTTYPE_LONG + 78; -pub const CURLOPT_HEADERFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 79; -pub const CURLOPT_HTTPGET: CURLoption = CURLOPTTYPE_LONG + 80; -pub const CURLOPT_SSL_VERIFYHOST: CURLoption = CURLOPTTYPE_LONG + 81; -pub const CURLOPT_COOKIEJAR: CURLoption = CURLOPTTYPE_OBJECTPOINT + 82; -pub const CURLOPT_SSL_CIPHER_LIST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 83; -pub const CURLOPT_HTTP_VERSION: CURLoption = CURLOPTTYPE_LONG + 84; -pub const CURLOPT_FTP_USE_EPSV: CURLoption = CURLOPTTYPE_LONG + 85; -pub const CURLOPT_SSLCERTTYPE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 86; -pub const CURLOPT_SSLKEY: CURLoption = CURLOPTTYPE_OBJECTPOINT + 87; -pub const CURLOPT_SSLKEYTYPE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 88; -pub const CURLOPT_SSLENGINE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 89; -pub const CURLOPT_SSLENGINE_DEFAULT: CURLoption = CURLOPTTYPE_LONG + 90; -pub const CURLOPT_DNS_USE_GLOBAL_CACHE: CURLoption = CURLOPTTYPE_LONG + 91; -pub const CURLOPT_DNS_CACHE_TIMEOUT: CURLoption = CURLOPTTYPE_LONG + 92; -pub const CURLOPT_PREQUOTE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 93; -pub const CURLOPT_DEBUGFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 94; -pub const CURLOPT_DEBUGDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 95; -pub const CURLOPT_COOKIESESSION: CURLoption = CURLOPTTYPE_LONG + 96; -pub const CURLOPT_CAPATH: CURLoption = CURLOPTTYPE_OBJECTPOINT + 97; -pub const CURLOPT_BUFFERSIZE: CURLoption = CURLOPTTYPE_LONG + 98; -pub const CURLOPT_NOSIGNAL: CURLoption = CURLOPTTYPE_LONG + 99; -pub const CURLOPT_SHARE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 100; -pub const CURLOPT_PROXYTYPE: CURLoption = CURLOPTTYPE_LONG + 101; -pub const CURLOPT_ACCEPT_ENCODING: CURLoption = CURLOPTTYPE_OBJECTPOINT + 102; -pub const CURLOPT_PRIVATE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 103; -pub const CURLOPT_HTTP200ALIASES: CURLoption = CURLOPTTYPE_OBJECTPOINT + 104; -pub const CURLOPT_UNRESTRICTED_AUTH: CURLoption = CURLOPTTYPE_LONG + 105; -pub const CURLOPT_FTP_USE_EPRT: CURLoption = CURLOPTTYPE_LONG + 106; -pub const CURLOPT_HTTPAUTH: CURLoption = CURLOPTTYPE_LONG + 107; -pub const CURLOPT_SSL_CTX_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 108; -pub const CURLOPT_SSL_CTX_DATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 109; -pub const CURLOPT_FTP_CREATE_MISSING_DIRS: CURLoption = CURLOPTTYPE_LONG + 110; -pub const CURLOPT_PROXYAUTH: CURLoption = CURLOPTTYPE_LONG + 111; -pub const CURLOPT_FTP_RESPONSE_TIMEOUT: CURLoption = CURLOPTTYPE_LONG + 112; -pub const CURLOPT_IPRESOLVE: CURLoption = CURLOPTTYPE_LONG + 113; -pub const CURLOPT_MAXFILESIZE: CURLoption = CURLOPTTYPE_LONG + 114; -pub const CURLOPT_INFILESIZE_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 115; -pub const CURLOPT_RESUME_FROM_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 116; -pub const CURLOPT_MAXFILESIZE_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 117; -pub const CURLOPT_NETRC_FILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 118; -pub const CURLOPT_USE_SSL: CURLoption = CURLOPTTYPE_LONG + 119; -pub const CURLOPT_POSTFIELDSIZE_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 120; -pub const CURLOPT_TCP_NODELAY: CURLoption = CURLOPTTYPE_LONG + 121; -pub const CURLOPT_FTPSSLAUTH: CURLoption = CURLOPTTYPE_LONG + 129; -pub const CURLOPT_IOCTLFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 130; -pub const CURLOPT_IOCTLDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 131; -pub const CURLOPT_FTP_ACCOUNT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 134; -pub const CURLOPT_COOKIELIST: CURLoption = CURLOPTTYPE_OBJECTPOINT + 135; -pub const CURLOPT_IGNORE_CONTENT_LENGTH: CURLoption = CURLOPTTYPE_LONG + 136; -pub const CURLOPT_FTP_SKIP_PASV_IP: CURLoption = CURLOPTTYPE_LONG + 137; -pub const CURLOPT_FTP_FILEMETHOD: CURLoption = CURLOPTTYPE_LONG + 138; -pub const CURLOPT_LOCALPORT: CURLoption = CURLOPTTYPE_LONG + 139; -pub const CURLOPT_LOCALPORTRANGE: CURLoption = CURLOPTTYPE_LONG + 140; -pub const CURLOPT_CONNECT_ONLY: CURLoption = CURLOPTTYPE_LONG + 141; -pub const CURLOPT_CONV_FROM_NETWORK_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 142; -pub const CURLOPT_CONV_TO_NETWORK_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 143; -pub const CURLOPT_CONV_FROM_UTF8_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 144; -pub const CURLOPT_MAX_SEND_SPEED_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 145; -pub const CURLOPT_MAX_RECV_SPEED_LARGE: CURLoption = CURLOPTTYPE_OFF_T + 146; -pub const CURLOPT_FTP_ALTERNATIVE_TO_USER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 147; -pub const CURLOPT_SOCKOPTFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 148; -pub const CURLOPT_SOCKOPTDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 149; -pub const CURLOPT_SSL_SESSIONID_CACHE: CURLoption = CURLOPTTYPE_LONG + 150; -pub const CURLOPT_SSH_AUTH_TYPES: CURLoption = CURLOPTTYPE_LONG + 151; -pub const CURLOPT_SSH_PUBLIC_KEYFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 152; -pub const CURLOPT_SSH_PRIVATE_KEYFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 153; -pub const CURLOPT_FTP_SSL_CCC: CURLoption = CURLOPTTYPE_LONG + 154; -pub const CURLOPT_TIMEOUT_MS: CURLoption = CURLOPTTYPE_LONG + 155; -pub const CURLOPT_CONNECTTIMEOUT_MS: CURLoption = CURLOPTTYPE_LONG + 156; -pub const CURLOPT_HTTP_TRANSFER_DECODING: CURLoption = CURLOPTTYPE_LONG + 157; -pub const CURLOPT_HTTP_CONTENT_DECODING: CURLoption = CURLOPTTYPE_LONG + 158; -pub const CURLOPT_NEW_FILE_PERMS: CURLoption = CURLOPTTYPE_LONG + 159; -pub const CURLOPT_NEW_DIRECTORY_PERMS: CURLoption = CURLOPTTYPE_LONG + 160; -pub const CURLOPT_POSTREDIR: CURLoption = CURLOPTTYPE_LONG + 161; -pub const CURLOPT_SSH_HOST_PUBLIC_KEY_MD5: CURLoption = CURLOPTTYPE_OBJECTPOINT + 162; -pub const CURLOPT_OPENSOCKETFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 163; -pub const CURLOPT_OPENSOCKETDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 164; -pub const CURLOPT_COPYPOSTFIELDS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 165; -pub const CURLOPT_PROXY_TRANSFER_MODE: CURLoption = CURLOPTTYPE_LONG + 166; -pub const CURLOPT_SEEKFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 167; -pub const CURLOPT_SEEKDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 168; -pub const CURLOPT_CRLFILE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 169; -pub const CURLOPT_ISSUERCERT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 170; -pub const CURLOPT_ADDRESS_SCOPE: CURLoption = CURLOPTTYPE_LONG + 171; -pub const CURLOPT_CERTINFO: CURLoption = CURLOPTTYPE_LONG + 172; -pub const CURLOPT_USERNAME: CURLoption = CURLOPTTYPE_OBJECTPOINT + 173; -pub const CURLOPT_PASSWORD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 174; -pub const CURLOPT_PROXYUSERNAME: CURLoption = CURLOPTTYPE_OBJECTPOINT + 175; -pub const CURLOPT_PROXYPASSWORD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 176; -pub const CURLOPT_NOPROXY: CURLoption = CURLOPTTYPE_OBJECTPOINT + 177; -pub const CURLOPT_TFTP_BLKSIZE: CURLoption = CURLOPTTYPE_LONG + 178; -pub const CURLOPT_SOCKS5_GSSAPI_SERVICE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 179; -pub const CURLOPT_SOCKS5_GSSAPI_NEC: CURLoption = CURLOPTTYPE_LONG + 180; -pub const CURLOPT_PROTOCOLS: CURLoption = CURLOPTTYPE_LONG + 181; -pub const CURLOPT_REDIR_PROTOCOLS: CURLoption = CURLOPTTYPE_LONG + 182; -pub const CURLOPT_SSH_KNOWNHOSTS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 183; -pub const CURLOPT_SSH_KEYFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 184; -pub const CURLOPT_SSH_KEYDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 185; -pub const CURLOPT_MAIL_FROM: CURLoption = CURLOPTTYPE_OBJECTPOINT + 186; -pub const CURLOPT_MAIL_RCPT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 187; -pub const CURLOPT_FTP_USE_PRET: CURLoption = CURLOPTTYPE_LONG + 188; -pub const CURLOPT_RTSP_REQUEST: CURLoption = CURLOPTTYPE_LONG + 189; -pub const CURLOPT_RTSP_SESSION_ID: CURLoption = CURLOPTTYPE_OBJECTPOINT + 190; -pub const CURLOPT_RTSP_STREAM_URI: CURLoption = CURLOPTTYPE_OBJECTPOINT + 191; -pub const CURLOPT_RTSP_TRANSPORT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 192; -pub const CURLOPT_RTSP_CLIENT_CSEQ: CURLoption = CURLOPTTYPE_LONG + 193; -pub const CURLOPT_RTSP_SERVER_CSEQ: CURLoption = CURLOPTTYPE_LONG + 194; -pub const CURLOPT_INTERLEAVEDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 195; -pub const CURLOPT_INTERLEAVEFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 196; -pub const CURLOPT_WILDCARDMATCH: CURLoption = CURLOPTTYPE_LONG + 197; -pub const CURLOPT_CHUNK_BGN_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 198; -pub const CURLOPT_CHUNK_END_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 199; -pub const CURLOPT_FNMATCH_FUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 200; -pub const CURLOPT_CHUNK_DATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 201; -pub const CURLOPT_FNMATCH_DATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 202; -pub const CURLOPT_RESOLVE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 203; -pub const CURLOPT_TLSAUTH_USERNAME: CURLoption = CURLOPTTYPE_OBJECTPOINT + 204; -pub const CURLOPT_TLSAUTH_PASSWORD: CURLoption = CURLOPTTYPE_OBJECTPOINT + 205; -pub const CURLOPT_TLSAUTH_TYPE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 206; -pub const CURLOPT_TRANSFER_ENCODING: CURLoption = CURLOPTTYPE_LONG + 207; -pub const CURLOPT_CLOSESOCKETFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 208; -pub const CURLOPT_CLOSESOCKETDATA: CURLoption = CURLOPTTYPE_OBJECTPOINT + 209; -pub const CURLOPT_GSSAPI_DELEGATION: CURLoption = CURLOPTTYPE_LONG + 210; -// pub const CURLOPT_DNS_SERVERS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 211; -// pub const CURLOPT_ACCEPTTIMEOUT_MS: CURLoption = CURLOPTTYPE_LONG + 212; -// pub const CURLOPT_TCP_KEEPALIVE: CURLoption = CURLOPTTYPE_LONG + 213; -// pub const CURLOPT_TCP_KEEPIDLE: CURLoption = CURLOPTTYPE_LONG + 214; -// pub const CURLOPT_TCP_KEEPINTVL: CURLoption = CURLOPTTYPE_LONG + 215; -// pub const CURLOPT_SSL_OPTIONS: CURLoption = CURLOPTTYPE_LONG + 216; -// pub const CURLOPT_MAIL_AUTH: CURLoption = CURLOPTTYPE_OBJECTPOINT + 217; -// pub const CURLOPT_SASL_IR: CURLoption = CURLOPTTYPE_LONG + 218; -// pub const CURLOPT_XFERINFOFUNCTION: CURLoption = CURLOPTTYPE_FUNCTIONPOINT + 219; -// pub const CURLOPT_XOAUTH2_BEARER: CURLoption = CURLOPTTYPE_OBJECTPOINT + 220; -// pub const CURLOPT_DNS_INTERFACE: CURLoption = CURLOPTTYPE_OBJECTPOINT + 221; -// pub const CURLOPT_DNS_LOCAL_IP4: CURLoption = CURLOPTTYPE_OBJECTPOINT + 222; -// pub const CURLOPT_DNS_LOCAL_IP6: CURLoption = CURLOPTTYPE_OBJECTPOINT + 223; -// pub const CURLOPT_LOGIN_OPTIONS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 224; - -pub const CURL_IPRESOLVE_WHATEVER: c_int = 0; -pub const CURL_IPRESOLVE_V4: c_int = 1; -pub const CURL_IPRESOLVE_V6: c_int = 2; - -// Note that the type here is wrong, it's just intended to just be an enum. -pub const CURL_SSLVERSION_DEFAULT: CURLoption = 0; -pub const CURL_SSLVERSION_TLSv1: CURLoption = 1; -pub const CURL_SSLVERSION_SSLv2: CURLoption = 2; -pub const CURL_SSLVERSION_SSLv3: CURLoption = 3; -// pub const CURL_SSLVERSION_TLSv1_0: CURLoption = 4; -// pub const CURL_SSLVERSION_TLSv1_1: CURLoption = 5; -// pub const CURL_SSLVERSION_TLSv1_2: CURLoption = 6; - -pub const CURLOPT_READDATA: CURLoption = CURLOPT_INFILE; -pub const CURLOPT_WRITEDATA: CURLoption = CURLOPT_FILE; -pub const CURLOPT_HEADERDATA: CURLoption = CURLOPT_WRITEHEADER; - -pub type curl_TimeCond = __enum_ty; -pub const CURL_TIMECOND_NONE: curl_TimeCond = 0; -pub const CURL_TIMECOND_IFMODSINCE: curl_TimeCond = 1; -pub const CURL_TIMECOND_IFUNMODSINCE: curl_TimeCond = 2; -pub const CURL_TIMECOND_LASTMOD: curl_TimeCond = 3; - -pub type CURLformoption = __enum_ty; -pub const CURLFORM_NOTHING: CURLformoption = 0; -pub const CURLFORM_COPYNAME: CURLformoption = 1; -pub const CURLFORM_PTRNAME: CURLformoption = 2; -pub const CURLFORM_NAMELENGTH: CURLformoption = 3; -pub const CURLFORM_COPYCONTENTS: CURLformoption = 4; -pub const CURLFORM_PTRCONTENTS: CURLformoption = 5; -pub const CURLFORM_CONTENTSLENGTH: CURLformoption = 6; -pub const CURLFORM_FILECONTENT: CURLformoption = 7; -pub const CURLFORM_ARRAY: CURLformoption = 8; -pub const CURLFORM_OBSOLETE: CURLformoption = 9; -pub const CURLFORM_FILE: CURLformoption = 10; -pub const CURLFORM_BUFFER: CURLformoption = 11; -pub const CURLFORM_BUFFERPTR: CURLformoption = 12; -pub const CURLFORM_BUFFERLENGTH: CURLformoption = 13; -pub const CURLFORM_CONTENTTYPE: CURLformoption = 14; -pub const CURLFORM_CONTENTHEADER: CURLformoption = 15; -pub const CURLFORM_FILENAME: CURLformoption = 16; -pub const CURLFORM_END: CURLformoption = 17; -pub const CURLFORM_STREAM: CURLformoption = 19; - -pub type CURLFORMcode = __enum_ty; -pub const CURL_FORMADD_OK: CURLFORMcode = 0; -pub const CURL_FORMADD_MEMORY: CURLFORMcode = 1; -pub const CURL_FORMADD_OPTION_TWICE: CURLFORMcode = 2; -pub const CURL_FORMADD_NULL: CURLFORMcode = 3; -pub const CURL_FORMADD_UNKNOWN_OPTION: CURLFORMcode = 4; -pub const CURL_FORMADD_INCOMPLETE: CURLFORMcode = 5; -pub const CURL_FORMADD_ILLEGAL_ARRAY: CURLFORMcode = 6; -pub const CURL_FORMADD_DISABLED: CURLFORMcode = 7; - -#[repr(C)] -pub struct curl_forms { - pub option: CURLformoption, - pub value: *const c_char, -} - -pub type curl_formget_callback = extern fn(*mut c_void, - *const c_char, - size_t) -> size_t; - -#[repr(C)] -pub struct curl_slist { - pub data: *mut c_char, - pub next: *mut curl_slist, -} - -#[repr(C)] -pub struct curl_certinfo { - pub num_of_certs: c_int, - pub certinfo: *mut *mut curl_slist, -} - -// pub type curl_sslbackend = __enum_ty; -// pub const CURLSSLBACKEND_NONE: curl_sslbackend = 0; -// pub const CURLSSLBACKEND_OPENSSL: curl_sslbackend = 1; -// pub const CURLSSLBACKEND_GNUTLS: curl_sslbackend = 2; -// pub const CURLSSLBACKEND_NSS: curl_sslbackend = 3; -// pub const CURLSSLBACKEND_QSOSSL: curl_sslbackend = 4; -// pub const CURLSSLBACKEND_GSKIT: curl_sslbackend = 5; -// pub const CURLSSLBACKEND_POLARSSL: curl_sslbackend = 6; -// pub const CURLSSLBACKEND_CYASSL: curl_sslbackend = 7; -// pub const CURLSSLBACKEND_SCHANNEL: curl_sslbackend = 8; -// pub const CURLSSLBACKEND_DARWINSSL: curl_sslbackend = 9; - -// #[repr(C)] -// pub struct curl_tlssessioninfo { -// pub backend: curl_sslbackend, -// pub internals: *mut c_void, -// } - -pub const CURLINFO_STRING: CURLINFO = 0x100000; -pub const CURLINFO_LONG: CURLINFO = 0x200000; -pub const CURLINFO_DOUBLE: CURLINFO = 0x300000; -pub const CURLINFO_SLIST: CURLINFO = 0x400000; -pub const CURLINFO_MASK: CURLINFO = 0x0fffff; -pub const CURLINFO_TYPEMASK: CURLINFO = 0xf00000; - -pub const CURLINFO_EFFECTIVE_URL: CURLINFO = CURLINFO_STRING + 1; -pub const CURLINFO_RESPONSE_CODE: CURLINFO = CURLINFO_LONG + 2; -pub const CURLINFO_TOTAL_TIME: CURLINFO = CURLINFO_DOUBLE + 3; -pub const CURLINFO_NAMELOOKUP_TIME: CURLINFO = CURLINFO_DOUBLE + 4; -pub const CURLINFO_CONNECT_TIME: CURLINFO = CURLINFO_DOUBLE + 5; -pub const CURLINFO_PRETRANSFER_TIME: CURLINFO = CURLINFO_DOUBLE + 6; -pub const CURLINFO_SIZE_UPLOAD: CURLINFO = CURLINFO_DOUBLE + 7; -pub const CURLINFO_SIZE_DOWNLOAD: CURLINFO = CURLINFO_DOUBLE + 8; -pub const CURLINFO_SPEED_DOWNLOAD: CURLINFO = CURLINFO_DOUBLE + 9; -pub const CURLINFO_SPEED_UPLOAD: CURLINFO = CURLINFO_DOUBLE + 10; -pub const CURLINFO_HEADER_SIZE: CURLINFO = CURLINFO_LONG + 11; -pub const CURLINFO_REQUEST_SIZE: CURLINFO = CURLINFO_LONG + 12; -pub const CURLINFO_SSL_VERIFYRESULT: CURLINFO = CURLINFO_LONG + 13; -pub const CURLINFO_FILETIME: CURLINFO = CURLINFO_LONG + 14; -pub const CURLINFO_CONTENT_LENGTH_DOWNLOAD: CURLINFO = CURLINFO_DOUBLE + 15; -pub const CURLINFO_CONTENT_LENGTH_UPLOAD: CURLINFO = CURLINFO_DOUBLE + 16; -pub const CURLINFO_STARTTRANSFER_TIME: CURLINFO = CURLINFO_DOUBLE + 17; -pub const CURLINFO_CONTENT_TYPE: CURLINFO = CURLINFO_STRING + 18; -pub const CURLINFO_REDIRECT_TIME: CURLINFO = CURLINFO_DOUBLE + 19; -pub const CURLINFO_REDIRECT_COUNT: CURLINFO = CURLINFO_LONG + 20; -pub const CURLINFO_PRIVATE: CURLINFO = CURLINFO_STRING + 21; -pub const CURLINFO_HTTP_CONNECTCODE: CURLINFO = CURLINFO_LONG + 22; -pub const CURLINFO_HTTPAUTH_AVAIL: CURLINFO = CURLINFO_LONG + 23; -pub const CURLINFO_PROXYAUTH_AVAIL: CURLINFO = CURLINFO_LONG + 24; -pub const CURLINFO_OS_ERRNO: CURLINFO = CURLINFO_LONG + 25; -pub const CURLINFO_NUM_CONNECTS: CURLINFO = CURLINFO_LONG + 26; -pub const CURLINFO_SSL_ENGINES: CURLINFO = CURLINFO_SLIST + 27; -pub const CURLINFO_COOKIELIST: CURLINFO = CURLINFO_SLIST + 28; -pub const CURLINFO_LASTSOCKET: CURLINFO = CURLINFO_LONG + 29; -pub const CURLINFO_FTP_ENTRY_PATH: CURLINFO = CURLINFO_STRING + 30; -pub const CURLINFO_REDIRECT_URL: CURLINFO = CURLINFO_STRING + 31; -pub const CURLINFO_PRIMARY_IP: CURLINFO = CURLINFO_STRING + 32; -pub const CURLINFO_APPCONNECT_TIME: CURLINFO = CURLINFO_DOUBLE + 33; -pub const CURLINFO_CERTINFO: CURLINFO = CURLINFO_SLIST + 34; -pub const CURLINFO_CONDITION_UNMET: CURLINFO = CURLINFO_LONG + 35; -pub const CURLINFO_RTSP_SESSION_ID: CURLINFO = CURLINFO_STRING + 36; -pub const CURLINFO_RTSP_CLIENT_CSEQ: CURLINFO = CURLINFO_LONG + 37; -pub const CURLINFO_RTSP_SERVER_CSEQ: CURLINFO = CURLINFO_LONG + 38; -pub const CURLINFO_RTSP_CSEQ_RECV: CURLINFO = CURLINFO_LONG + 39; -pub const CURLINFO_PRIMARY_PORT: CURLINFO = CURLINFO_LONG + 40; -pub const CURLINFO_LOCAL_IP: CURLINFO = CURLINFO_STRING + 41; -pub const CURLINFO_LOCAL_PORT: CURLINFO = CURLINFO_LONG + 42; -// pub const CURLINFO_TLS_SESSION: CURLINFO = CURLINFO_SLIST + 43; - -pub type curl_closepolicy = __enum_ty; -pub const CURLCLOSEPOLICY_NONE: curl_closepolicy = 0; -pub const CURLCLOSEPOLICY_OLDEST: curl_closepolicy = 1; -pub const CURLCLOSEPOLICY_LEAST_RECENTLY_USED: curl_closepolicy = 2; -pub const CURLCLOSEPOLICY_LEAST_TRAFFIC: curl_closepolicy = 3; -pub const CURLCLOSEPOLICY_SLOWEST: curl_closepolicy = 4; -pub const CURLCLOSEPOLICY_CALLBACK: curl_closepolicy = 5; - -pub const CURL_GLOBAL_SSL: c_long = 1 << 0; -pub const CURL_GLOBAL_WIN32: c_long = 1 << 1; -pub const CURL_GLOBAL_ALL: c_long = CURL_GLOBAL_SSL | CURL_GLOBAL_WIN32; -pub const CURL_GLOBAL_NOTHING: c_long = 0; -pub const CURL_GLOBAL_DEFAULT: c_long = CURL_GLOBAL_ALL; -// pub const CURL_GLOBAL_ACK_EINTR: c_long = 1 << 2; - -pub type curl_lock_data = __enum_ty; -pub const CURL_LOCK_DATA_NONE: curl_lock_data = 0; -pub const CURL_LOCK_DATA_SHARE: curl_lock_data = 1; -pub const CURL_LOCK_DATA_COOKIE: curl_lock_data = 2; -pub const CURL_LOCK_DATA_DNS: curl_lock_data = 3; -pub const CURL_LOCK_DATA_SSL_SESSION: curl_lock_data = 4; -pub const CURL_LOCK_DATA_CONNECT: curl_lock_data = 5; - -pub type curl_lock_access = __enum_ty; -pub const CURL_LOCK_ACCESS_NONE: curl_lock_access = 0; -pub const CURL_LOCK_ACCESS_SHARED: curl_lock_access = 1; -pub const CURL_LOCK_ACCESS_SINGLE: curl_lock_access = 2; - -pub type curl_lock_function = extern fn(*mut CURL, - curl_lock_data, - curl_lock_access, - *mut c_void); -pub type curl_unlock_function = extern fn(*mut CURL, - curl_lock_data, - *mut c_void); - -pub enum CURLSH {} - -pub type CURLSHcode = __enum_ty; -pub const CURLSHE_OK: CURLSHcode = 0; -pub const CURLSHE_BAD_OPTION: CURLSHcode = 1; -pub const CURLSHE_IN_USE: CURLSHcode = 2; -pub const CURLSHE_INVALID: CURLSHcode = 3; -pub const CURLSHE_NOMEM: CURLSHcode = 4; -// pub const CURLSHE_NOT_BUILT_IN: CURLSHcode = 5; - -pub type CURLSHoption = __enum_ty; -pub const CURLSHOPT_NONE: CURLSHoption = 0; -pub const CURLSHOPT_SHARE: CURLSHoption = 1; -pub const CURLSHOPT_UNSHARE: CURLSHoption = 2; -pub const CURLSHOPT_LOCKFUNC: CURLSHoption = 3; -pub const CURLSHOPT_UNLOCKFUNC: CURLSHoption = 4; -pub const CURLSHOPT_USERDATA: CURLSHoption = 5; - -pub const CURLVERSION_FIRST: CURLversion = 0; -pub const CURLVERSION_SECOND: CURLversion = 1; -pub const CURLVERSION_THIRD: CURLversion = 2; -pub const CURLVERSION_FOURTH: CURLversion = 3; - -#[repr(C)] -pub struct curl_version_info_data { - pub age: CURLversion, - pub version: *const c_char, - pub version_num: c_uint, - pub host: *const c_char, - pub features: c_int, - pub ssl_version: *const c_char, - pub ssl_version_num: c_long, - pub libz_version: *const c_char, - pub protocols: *const *const c_char, - pub ares: *const c_char, - pub ares_num: c_int, - pub libidn: *const c_char, - pub iconv_ver_num: c_int, - pub libssh_version: *const c_char, -} - -pub const CURL_VERSION_IPV6: c_int = 1 << 0; -pub const CURL_VERSION_KERBEROS4: c_int = 1 << 1; -pub const CURL_VERSION_SSL: c_int = 1 << 2; -pub const CURL_VERSION_LIBZ: c_int = 1 << 3; -pub const CURL_VERSION_NTLM: c_int = 1 << 4; -pub const CURL_VERSION_GSSNEGOTIATE: c_int = 1 << 5; -pub const CURL_VERSION_DEBUG: c_int = 1 << 6; -pub const CURL_VERSION_ASYNCHDNS: c_int = 1 << 7; -pub const CURL_VERSION_SPNEGO: c_int = 1 << 8; -pub const CURL_VERSION_LARGEFILE: c_int = 1 << 9; -pub const CURL_VERSION_IDN: c_int = 1 << 10; -pub const CURL_VERSION_SSPI: c_int = 1 << 11; -pub const CURL_VERSION_CONV: c_int = 1 << 12; -pub const CURL_VERSION_CURLDEBUG: c_int = 1 << 13; -pub const CURL_VERSION_TLSAUTH_SRP: c_int = 1 << 14; -pub const CURL_VERSION_NTLM_WB: c_int = 1 << 15; -// pub const CURL_VERSION_HTTP2: c_int = 1 << 16; - -pub const CURLPAUSE_RECV: c_int = 1 << 0; -pub const CURLPAUSE_RECV_CONT: c_int = 0; -pub const CURLPAUSE_SEND: c_int = 1 << 2; -pub const CURLPAUSE_SEND_CONT: c_int = 0; - -pub enum CURLM {} - -pub type CURLMcode = c_int; -pub const CURLM_CALL_MULTI_PERFORM: CURLMcode = -1; -pub const CURLM_OK: CURLMcode = 0; -pub const CURLM_BAD_HANDLE: CURLMcode = 1; -pub const CURLM_BAD_EASY_HANDLE: CURLMcode = 2; -pub const CURLM_OUT_OF_MEMORY: CURLMcode = 3; -pub const CURLM_INTERNAL_ERROR: CURLMcode = 4; -pub const CURLM_BAD_SOCKET: CURLMcode = 5; -pub const CURLM_UNKNOWN_OPTION: CURLMcode = 6; -// pub const CURLM_ADDED_ALREADY: CURLMcode = 7; - -pub type CURLMSG = __enum_ty; -pub const CURLMSG_NONE: CURLMSG = 0; -pub const CURLMSG_DONE: CURLMSG = 1; - -#[repr(C)] -pub struct CURLMsg { - pub msg: CURLMSG, - pub easy_handle: *mut CURL, - pub data: *mut c_void, -} - -// pub const CURL_WAIT_POLLIN: c_short = 0x1; -// pub const CURL_WAIT_POLLPRI: c_short = 0x2; -// pub const CURL_WAIT_POLLOUT: c_short = 0x4; - -// #[repr(C)] -// pub struct curl_waitfd { -// pub fd: curl_socket_t, -// pub events: c_short, -// pub revents: c_short, -// } - -pub const CURL_POLL_NONE: c_int = 0; -pub const CURL_POLL_IN: c_int = 1; -pub const CURL_POLL_OUT: c_int = 2; -pub const CURL_POLL_INOUT: c_int = 3; -pub const CURL_POLL_REMOVE: c_int = 4; -pub const CURL_CSELECT_IN: c_int = 1; -pub const CURL_CSELECT_OUT: c_int = 2; -pub const CURL_CSELECT_ERR: c_int = 4; -pub const CURL_SOCKET_TIMEOUT: curl_socket_t = CURL_SOCKET_BAD; - -pub type curl_socket_callback = extern fn(*mut CURL, - curl_socket_t, - c_int, - *mut c_void, - *mut c_void) -> c_int; -pub type curl_multi_timer_callback = extern fn(*mut CURLM, - c_long, - *mut c_void) -> c_int; - -pub type CURLMoption = __enum_ty; -pub const CURLMOPT_SOCKETFUNCTION: CURLMoption = CURLOPTTYPE_FUNCTIONPOINT + 1; -pub const CURLMOPT_SOCKETDATA: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 2; -pub const CURLMOPT_PIPELINING: CURLMoption = CURLOPTTYPE_LONG + 3; -pub const CURLMOPT_TIMERFUNCTION: CURLMoption = CURLOPTTYPE_FUNCTIONPOINT + 4; -pub const CURLMOPT_TIMERDATA: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 5; -// pub const CURLMOPT_MAXCONNECTS: CURLMoption = CURLOPTTYPE_LONG + 6; -// pub const CURLMOPT_MAX_HOST_CONNECTIONS: CURLMoption = CURLOPTTYPE_LONG + 7; -// pub const CURLMOPT_MAX_PIPELINE_LENGTH: CURLMoption = CURLOPTTYPE_LONG + 8; -// pub const CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE: CURLMoption = CURLOPTTYPE_OFF_T + 9; -// pub const CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE: CURLMoption = CURLOPTTYPE_OFF_T + 10; -// pub const CURLMOPT_PIPELINING_SITE_BL: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 11; -// pub const CURLMOPT_PIPELINING_SERVER_BL: CURLMoption = CURLOPTTYPE_OBJECTPOINT + 12; -// pub const CURLMOPT_MAX_TOTAL_CONNECTIONS: CURLMoption = CURLOPTTYPE_LONG + 13; - -pub const CURL_ERROR_SIZE: usize = 256; - -extern { - pub fn curl_formadd(httppost: *mut *mut curl_httppost, - last_post: *mut *mut curl_httppost, - ...) -> CURLFORMcode; - pub fn curl_formget(form: *mut curl_httppost, - arg: *mut c_void, - append: curl_formget_callback) -> c_int; - pub fn curl_formfree(form: *mut curl_httppost); - - pub fn curl_version() -> *mut c_char; - - pub fn curl_easy_escape(handle: *mut CURL, - string: *const c_char, - length: c_int) -> *mut c_char; - pub fn curl_easy_unescape(handle: *mut CURL, - string: *const c_char, - length: c_int, - outlength: *mut c_int) -> *mut c_char; - pub fn curl_free(p: *mut c_void); - - pub fn curl_global_init(flags: c_long) -> CURLcode; - pub fn curl_global_init_mem(flags: c_long, - m: curl_malloc_callback, - f: curl_free_callback, - r: curl_realloc_callback, - s: curl_strdup_callback, - c: curl_calloc_callback) -> CURLcode; - pub fn curl_global_cleanup(); - - pub fn curl_slist_append(list: *mut curl_slist, - val: *const c_char) -> *mut curl_slist; - pub fn curl_slist_free_all(list: *mut curl_slist); - - pub fn curl_getdate(p: *const c_char, _: *const time_t) -> time_t; - - pub fn curl_share_init() -> *mut CURLSH; - pub fn curl_share_setopt(sh: *mut CURLSH, - opt: CURLSHoption, - ...) -> CURLSHcode; - pub fn curl_share_cleanup(sh: *mut CURLSH) -> CURLSHcode; - - pub fn curl_version_info(t: CURLversion) -> *mut curl_version_info_data; - - pub fn curl_easy_strerror(code: CURLcode) -> *const c_char; - pub fn curl_share_strerror(code: CURLSHcode) -> *const c_char; - pub fn curl_easy_pause(handle: *mut CURL, bitmask: c_int) -> CURLcode; - - pub fn curl_easy_init() -> *mut CURL; - pub fn curl_easy_setopt(curl: *mut CURL, option: CURLoption, ...) -> CURLcode; - pub fn curl_easy_perform(curl: *mut CURL) -> CURLcode; - pub fn curl_easy_cleanup(curl: *mut CURL); - pub fn curl_easy_getinfo(curl: *mut CURL, info: CURLINFO, ...) -> CURLcode; - pub fn curl_easy_duphandle(curl: *mut CURL) -> *mut CURL; - pub fn curl_easy_reset(curl: *mut CURL); - pub fn curl_easy_recv(curl: *mut CURL, - buffer: *mut c_void, - buflen: size_t, - n: *mut size_t) -> CURLcode; - pub fn curl_easy_send(curl: *mut CURL, - buffer: *const c_void, - buflen: size_t, - n: *mut size_t) -> CURLcode; - - pub fn curl_multi_init() -> *mut CURLM; - pub fn curl_multi_add_handle(multi_handle: *mut CURLM, - curl_handle: *mut CURL) -> CURLMcode; - pub fn curl_multi_remove_handle(multi_handle: *mut CURLM, - curl_handle: *mut CURL) -> CURLMcode; - pub fn curl_multi_fdset(multi_handle: *mut CURLM, - read_fd_set: *mut fd_set, - write_fd_set: *mut fd_set, - exc_fd_set: *mut fd_set, - max_fd: *mut c_int) -> CURLMcode; - // pub fn curl_multi_wait(multi_handle: *mut CURLM, - // extra_fds: *mut curl_waitfd, - // extra_nfds: c_uint, - // timeout_ms: c_int, - // ret: *mut c_int) -> CURLMcode; - pub fn curl_multi_perform(multi_handle: *mut CURLM, - running_handles: *mut c_int) -> CURLMcode; - pub fn curl_multi_cleanup(multi_handle: *mut CURLM) -> CURLMcode; - pub fn curl_multi_info_read(multi_handle: *mut CURLM, - msgs_in_queue: *mut c_int) -> *mut CURLMsg; - pub fn curl_multi_strerror(code: CURLMcode) -> *const c_char; - pub fn curl_multi_socket(multi_handle: *mut CURLM, - s: curl_socket_t, - running_handles: *mut c_int) -> CURLMcode; - pub fn curl_multi_socket_action(multi_handle: *mut CURLM, - s: curl_socket_t, - ev_bitmask: c_int, - running_handles: *mut c_int) -> CURLMcode; - pub fn curl_multi_socket_all(multi_handle: *mut CURLM, - running_handles: *mut c_int) -> CURLMcode; - pub fn curl_multi_timeout(multi_handle: *mut CURLM, - milliseconds: *mut c_long) -> CURLMcode; - pub fn curl_multi_setopt(multi_handle: *mut CURLM, - option: CURLMoption, - ...) -> CURLMcode; - pub fn curl_multi_assign(multi_handle: *mut CURLM, - sockfd: curl_socket_t, - sockp: *mut c_void) -> CURLMcode; -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/.cargo-checksum.json cargo-0.19.0/vendor/docopt-0.6.86/.cargo-checksum.json --- cargo-0.17.0/vendor/docopt-0.6.86/.cargo-checksum.json 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"4c1232061d048af6e883c43903fb9a75d8596560336ffd98ead534ef832d3ad7",".travis.yml":"f31fa6b59b174d595b976c502f7f75c9d72868c27adb24af6bdc18a6b1898be3","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"f49a451f8deb9e7a92ec02f9b9213092c3a14accd6d0a4206fecc413ef1c252a","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","Makefile":"db1787c5c7d2daea87d92c0549976a18bbe0601acb2ab5bd8dc5edb9f2b46e63","README.md":"ab86cce5636418ce23068c2a2377fd8645f5446554f2464e8ddab364345b0735","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","completions/docopt-wordlist.bash":"213bf1baea244eeb32af3a24a9ad895212cb538e3cdaee3bfed842b11a2a64d8","ctags.rust":"3d128d3cc59f702e68953ba2fe6c3f46bc6991fc575308db060482d5da0c79f3","examples/cargo.rs":"7f4b8a06244f8e82b94bb659c0346a258499b73aa5c76120261c7ebbfaaa3eb0","examples/cp.rs":"4820c6be6d60714927c8cfd9707b60ebe60b90a0df11cc71f342f4578352f366","examples/decode.rs":"2e5d713f977b7626015fc6e7049039a4f727379b8a5055a40c8d0871983a3044","examples/hashmap.rs":"9066a7b7192e15b3b667702519645d31926a371bc54ab8d70b211d98458d5a8d","examples/optional_command.rs":"eefe58a4ea192bc87262743785be40e9d38de2185b0d7fa8e3a9ac2386cb3d64","examples/verbose_multiple.rs":"47240d86b0a6e69b8156f59dfced062dad1cc418e9de50be85bd8c7ed036cfb9","scripts/mk-testcases":"649f37d391650175c8462171f7a98fce81735c9317630a5eb13db532ddb22976","session.vim":"1d51566b00f8ff2021d56948c1c55f123959f3e24879a6ad9337eccb11fc8fe9","src/dopt.rs":"e82f18b1963ee5e65a74b076361b1ce93b194937295dcfa6d4620694dec8f1b4","src/lib.rs":"853762c3b565b3d36b9e8d10c35cf04babb2b122c66ee8949e38ae15b04e1982","src/parse.rs":"8d570502802b5ecb066f25c41e07f5d7da26de60e0cda846e1dd417124e16707","src/synonym.rs":"5eeec443a7df8549a31c1510fbeaeadde577d899cd651578cd7145835656b4a6","src/test/mod.rs":"38096547cc5cd6a02008e61e82a2ebfd08c809ca3b5054e17f4c1a3455dc7ccc","src/test/suggestions.rs":"51e044db856a424ef12d2bc2eb541ae922b93d81ac5548767c9c638ccd87d388","src/test/testcases.docopt":"13fcd2948a5625b76f93b98ac7b6cb53ef70c119fc2c5f85d2cb67e56bd4e9c3","src/test/testcases.rs":"cbecfab0c82249a7d8ad193ad5e9e10f45a7a41b37e69cfc025a9cdc6c213f04","src/wordlist.rs":"0996af88786fdbb6e2212f879dc6c290286d207b631bb88fad4af81450f98585"},"package":"4a7ef30445607f6fc8720f0a0a2c7442284b629cf0d049286860fae23e71c4d9"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/Cargo.toml cargo-0.19.0/vendor/docopt-0.6.86/Cargo.toml --- cargo-0.17.0/vendor/docopt-0.6.86/Cargo.toml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -[package] -name = "docopt" -version = "0.6.86" #:version -authors = ["Andrew Gallant "] -description = "Command line argument parsing." -documentation = "http://burntsushi.net/rustdoc/docopt/" -homepage = "https://github.com/docopt/docopt.rs" -repository = "https://github.com/docopt/docopt.rs" -readme = "README.md" -keywords = ["docopt", "argument", "command", "argv"] -license = "Unlicense/MIT" - -[lib] -name = "docopt" - -[[bin]] -name = "docopt-wordlist" -path = "src/wordlist.rs" -doc = false -test = false - -[dependencies] -lazy_static = "0.2" -regex = "0.1" -rustc-serialize = "0.3" -strsim = "0.5" diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/completions/docopt-wordlist.bash cargo-0.19.0/vendor/docopt-0.6.86/completions/docopt-wordlist.bash --- cargo-0.17.0/vendor/docopt-0.6.86/completions/docopt-wordlist.bash 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/completions/docopt-wordlist.bash 1970-01-01 00:00:00.000000000 +0000 @@ -1,79 +0,0 @@ -# This is your basic tab completion that will work well with commands that -# have only one usage (i.e., no distinct sub-commands). -# -# Completion works by simply taking the command name and running `$cmd --help` -# to get the usage (which is then parsed for possible completions). -function _docopt_wordlist { - if [ -z "$DOCOPT_WORDLIST_BIN" ]; then - DOCOPT_WORDLIST_BIN=/usr/local/bin/docopt-wordlist - fi - - cword=$(_get_cword) - cmd="${COMP_WORDS[0]}" - wordlist=$("$cmd" --help 2>&1 | "$DOCOPT_WORDLIST_BIN") - gen "$cword" "$wordlist" -} - -# This is a fancier version of the above that supports commands that have -# multiple sub-commands (i.e., distinct usages like Cargo). -# -# This supports sub-command completion only if `$cmd --list` shows a list of -# available sub-commands. -# -# Otherwise, the usage for the command `a b c d` is taken from the first -# command that exits successfully: -# -# a b c d --help -# a b c --help -# a b --help -# a --help -# -# So for example, if you've typed `cargo test --jo`, then the following -# happens: -# -# cargo test --jo --help # error -# cargo test --help # gives 'test' sub-command usage! -# -# As a special case, if only the initial command has been typed, then the -# sub-commands (taken from `$cmd --list`) are added to the wordlist. -function _docopt_wordlist_commands { - if [ -z "$DOCOPT_WORDLIST_BIN" ]; then - DOCOPT_WORDLIST_BIN=/usr/local/bin/docopt-wordlist - fi - - cword=$(_get_cword) - if [ "$COMP_CWORD" = 1 ]; then - cmd="${COMP_WORDS[0]}" - wordlist=$("$cmd" --help 2>&1 | "$DOCOPT_WORDLIST_BIN") - wordlist+=" $("$cmd" --list | egrep '^ +\w' | awk '{print $1}')" - gen "$cword" "$wordlist" - else - for ((i="$COMP_CWORD"; i >= 1; i++)); do - cmd="${COMP_WORDS[@]::$i}" - wordlist=$($cmd --help 2>&1 | "$DOCOPT_WORDLIST_BIN") - if [ $? = 0 ]; then - gen "$cword" "$wordlist" - break - fi - done - fi -} - -# A helper function for running `compgen`, which is responsible for taking -# a prefix and presenting possible completions. -# -# If the current prefix starts with a `.` or a `/`, then file/directory -# completion is done. Otherwise, Docopt completion is done. If Docopt -# completion is empty, then it falls back to file/directory completion. -function gen { - cword="$1" - wordlist="$2" - if [[ "$cword" = .* || "$cword" = /* ]]; then - COMPREPLY=($(compgen -A file -- "$cword")) - else - COMPREPLY=($(compgen -W "$wordlist" -- "$cword")) - if [ -z "$COMPREPLY" ]; then - COMPREPLY=($(compgen -A file -- "$cword")) - fi - fi -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/COPYING cargo-0.19.0/vendor/docopt-0.6.86/COPYING --- cargo-0.17.0/vendor/docopt-0.6.86/COPYING 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/COPYING 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -This project is dual-licensed under the Unlicense and MIT licenses. - -You may use this code under the terms of either license. diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/ctags.rust cargo-0.19.0/vendor/docopt-0.6.86/ctags.rust --- cargo-0.17.0/vendor/docopt-0.6.86/ctags.rust 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/ctags.rust 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ ---langdef=Rust ---langmap=Rust:.rs ---regex-Rust=/^[ \t]*(#\[[^\]]\][ \t]*)*(pub[ \t]+)?(extern[ \t]+)?("[^"]+"[ \t]+)?(unsafe[ \t]+)?fn[ \t]+([a-zA-Z0-9_]+)/\6/f,functions,function definitions/ ---regex-Rust=/^[ \t]*(pub[ \t]+)?type[ \t]+([a-zA-Z0-9_]+)/\2/T,types,type definitions/ ---regex-Rust=/^[ \t]*(pub[ \t]+)?enum[ \t]+([a-zA-Z0-9_]+)/\2/g,enum,enumeration names/ ---regex-Rust=/^[ \t]*(pub[ \t]+)?struct[ \t]+([a-zA-Z0-9_]+)/\2/s,structure names/ ---regex-Rust=/^[ \t]*(pub[ \t]+)?mod[ \t]+([a-zA-Z0-9_]+)/\2/m,modules,module names/ ---regex-Rust=/^[ \t]*(pub[ \t]+)?static[ \t]+([a-zA-Z0-9_]+)/\2/c,consts,static constants/ ---regex-Rust=/^[ \t]*(pub[ \t]+)?trait[ \t]+([a-zA-Z0-9_]+)/\2/t,traits,traits/ ---regex-Rust=/^[ \t]*(pub[ \t]+)?impl([ \t\n]+<.*>)?[ \t]+([a-zA-Z0-9_]+)/\3/i,impls,trait implementations/ ---regex-Rust=/^[ \t]*macro_rules![ \t]+([a-zA-Z0-9_]+)/\1/d,macros,macro definitions/ diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/examples/cargo.rs cargo-0.19.0/vendor/docopt-0.6.86/examples/cargo.rs --- cargo-0.17.0/vendor/docopt-0.6.86/examples/cargo.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/examples/cargo.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -extern crate rustc_serialize; -extern crate docopt; - -use docopt::Docopt; - -// Write the Docopt usage string. -const USAGE: &'static str = " -Rust's package manager - -Usage: - cargo [...] - cargo [options] - -Options: - -h, --help Display this message - -V, --version Print version info and exit - --list List installed commands - -v, --verbose Use verbose output - -Some common cargo commands are: - build Compile the current project - clean Remove the target directory - doc Build this project's and its dependencies' documentation - new Create a new cargo project - run Build and execute src/main.rs - test Run the tests - bench Run the benchmarks - update Update dependencies listed in Cargo.lock - -See 'cargo help ' for more information on a specific command. -"; - -#[derive(Debug, RustcDecodable)] -struct Args { - arg_command: Option, - arg_args: Vec, - flag_list: bool, - flag_verbose: bool, -} - -#[derive(Debug, RustcDecodable)] -enum Command { - Build, Clean, Doc, New, Run, Test, Bench, Update, -} - -fn main() { - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.options_first(true).decode()) - .unwrap_or_else(|e| e.exit()); - println!("{:?}", args); -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/examples/cp.rs cargo-0.19.0/vendor/docopt-0.6.86/examples/cp.rs --- cargo-0.17.0/vendor/docopt-0.6.86/examples/cp.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/examples/cp.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -extern crate rustc_serialize; -extern crate docopt; - -use docopt::Docopt; - -// Write the Docopt usage string. -const USAGE: &'static str = " -Usage: cp [-a] - cp [-a] ...

- -Options: - -a, --archive Copy everything. -"; - -#[derive(Debug, RustcDecodable)] -struct Args { - arg_source: Vec, - arg_dest: String, - arg_dir: String, - flag_archive: bool, -} - -fn main() { - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - println!("{:?}", args); -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/examples/decode.rs cargo-0.19.0/vendor/docopt-0.6.86/examples/decode.rs --- cargo-0.17.0/vendor/docopt-0.6.86/examples/decode.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/examples/decode.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -extern crate rustc_serialize; -extern crate docopt; - -use docopt::Docopt; - -const USAGE: &'static str = " -Naval Fate. - -Usage: - naval_fate.py ship new ... - naval_fate.py ship move [--speed=] - naval_fate.py ship shoot - naval_fate.py mine (set|remove) [--moored | --drifting] - naval_fate.py (-h | --help) - naval_fate.py --version - -Options: - -h --help Show this screen. - --version Show version. - --speed= Speed in knots [default: 10]. - --moored Moored (anchored) mine. - --drifting Drifting mine. -"; - -#[derive(Debug, RustcDecodable)] -struct Args { - flag_speed: isize, - flag_drifting: bool, - arg_name: Vec, - arg_x: Option, - arg_y: Option, - cmd_ship: bool, - cmd_mine: bool, -} - -fn main() { - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - println!("{:?}", args); - - println!("\nSome values:"); - println!(" Speed: {}", args.flag_speed); - println!(" Drifting? {}", args.flag_drifting); - println!(" Names: {:?}", args.arg_name); - println!(" Command 'ship' invoked? {:?}", args.cmd_ship); -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/examples/hashmap.rs cargo-0.19.0/vendor/docopt-0.6.86/examples/hashmap.rs --- cargo-0.17.0/vendor/docopt-0.6.86/examples/hashmap.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/examples/hashmap.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -extern crate docopt; - -use docopt::Docopt; - -const USAGE: &'static str = " -Naval Fate. - -Usage: - naval_fate.py ship new ... - naval_fate.py ship move [--speed=] - naval_fate.py ship shoot - naval_fate.py mine (set|remove) [--moored | --drifting] - naval_fate.py (-h | --help) - naval_fate.py --version - -Options: - -h --help Show this screen. - --version Show version. - --speed= Speed in knots [default: 10]. - --moored Moored (anchored) mine. - --drifting Drifting mine. -"; - -fn main() { - let version = "1.2.3".to_owned(); - let args = Docopt::new(USAGE) - .and_then(|dopt| dopt.version(Some(version)).parse()) - .unwrap_or_else(|e| e.exit()); - println!("{:?}", args); - - // You can conveniently access values with `get_{bool,count,str,vec}` - // functions. If the key doesn't exist (or if, e.g., you use `get_str` on - // a switch), then a sensible default value is returned. - println!("\nSome values:"); - println!(" Speed: {}", args.get_str("--speed")); - println!(" Drifting? {}", args.get_bool("--drifting")); - println!(" Names: {:?}", args.get_vec("")); - println!(" Command 'ship' invoked? {:?}", args.get_bool("ship")); -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/examples/optional_command.rs cargo-0.19.0/vendor/docopt-0.6.86/examples/optional_command.rs --- cargo-0.17.0/vendor/docopt-0.6.86/examples/optional_command.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/examples/optional_command.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -// This example shows how to implement a command with a "catch all." -// -// This requires writing your own impl for `Decodable` because docopt's -// decoder uses `Option` to mean "T may not be present" rather than -// "T may be present but incorrect." - -extern crate rustc_serialize; -extern crate docopt; - -use docopt::Docopt; -use rustc_serialize::{Decodable, Decoder}; - -// Write the Docopt usage string. -const USAGE: &'static str = " -Rust's package manager - -Usage: - mycli [] - -Options: - -h, --help Display this message -"; - -#[derive(Debug, RustcDecodable)] -struct Args { - arg_command: Command, -} - -impl Decodable for Command { - fn decode(d: &mut D) -> Result { - let s = try!(d.read_str()); - Ok(match &*s { - "" => Command::None, - "A" => Command::A, - "B" => Command::B, - "C" => Command::C, - s => Command::Unknown(s.to_string()), - }) - } -} - -#[derive(Debug)] -enum Command { A, B, C, Unknown(String), None } - -fn main() { - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - println!("{:?}", args); -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/examples/verbose_multiple.rs cargo-0.19.0/vendor/docopt-0.6.86/examples/verbose_multiple.rs --- cargo-0.17.0/vendor/docopt-0.6.86/examples/verbose_multiple.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/examples/verbose_multiple.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -extern crate rustc_serialize; -extern crate docopt; - -use docopt::Docopt; - -// This shows how to implement multiple levels of verbosity. -// -// When you have multiple patterns, I think the only way to carry the -// repeated flag through all of them is to specify it for each pattern -// explicitly. -// -// This is unfortunate. -const USAGE: &'static str = " -Usage: cp [options] [-v | -vv | -vvv] - cp [options] [-v | -vv | -vvv] ... - -Options: - -a, --archive Copy everything. - -v, --verbose Show extra log output. -"; - -#[derive(Debug, RustcDecodable)] -struct Args { - arg_source: Vec, - arg_dest: String, - arg_dir: String, - flag_archive: bool, - flag_verbose: usize, -} - -fn main() { - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - println!("{:?}", args); -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/.gitignore cargo-0.19.0/vendor/docopt-0.6.86/.gitignore --- cargo-0.17.0/vendor/docopt-0.6.86/.gitignore 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -.*.swp -doc -tags -target -scratch* -Cargo.lock diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/LICENSE-MIT cargo-0.19.0/vendor/docopt-0.6.86/LICENSE-MIT --- cargo-0.17.0/vendor/docopt-0.6.86/LICENSE-MIT 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Andrew Gallant - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/Makefile cargo-0.19.0/vendor/docopt-0.6.86/Makefile --- cargo-0.17.0/vendor/docopt-0.6.86/Makefile 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -all: - @echo Nothing to do - -docs: $(LIB_FILES) - cargo doc - # WTF is rustdoc doing? - in-dir ./target/doc fix-perms - rscp ./target/doc/* gopher:~/www/burntsushi.net/rustdoc/ - -src/test/testcases.rs: src/test/testcases.docopt scripts/mk-testcases - ./scripts/mk-testcases ./src/test/testcases.docopt > ./src/test/testcases.rs - -ctags: - ctags --recurse --options=ctags.rust --languages=Rust - -push: - git push github master - git push origin master diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/README.md cargo-0.19.0/vendor/docopt-0.6.86/README.md --- cargo-0.17.0/vendor/docopt-0.6.86/README.md 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,343 +0,0 @@ -Docopt for Rust with automatic type based decoding (i.e., data validation). -This implementation conforms to the -[official description of Docopt](http://docopt.org/) and -[passes its test suite](https://github.com/docopt/docopt/pull/201). - -[![Build status](https://api.travis-ci.org/docopt/docopt.rs.svg)](https://travis-ci.org/docopt/docopt.rs) -[![](http://meritbadge.herokuapp.com/docopt)](https://crates.io/crates/docopt) - -Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org). - - -### Current status - -Fully functional but the design of the API is up for debate. **I am seeking -feedback**. - - -### Documentation - -[http://burntsushi.net/rustdoc/docopt](http://burntsushi.net/rustdoc/docopt/index.html) - - -### Installation - -This crate is fully compatible with Cargo. Just add it to your `Cargo.toml`: - -```toml -[dependencies] -docopt = "0.6" -rustc-serialize = "0.3" # if you're using `derive(RustcDecodable)` -``` - -If you want to use the macro, then add `docopt_macros = "0.6"` instead. -Note that the **`docopt!` macro only works on a nightly Rust compiler** because -it is a compiler plugin. - - -### Quick example - -Here is a full working example. Notice that you can specify the types of each -of the named values in the Docopt usage string. Values will be automatically -converted to those types (or an error will be reported). - -```rust -extern crate rustc_serialize; -extern crate docopt; - -use docopt::Docopt; - -const USAGE: &'static str = " -Naval Fate. - -Usage: - naval_fate.py ship new ... - naval_fate.py ship move [--speed=] - naval_fate.py ship shoot - naval_fate.py mine (set|remove) [--moored | --drifting] - naval_fate.py (-h | --help) - naval_fate.py --version - -Options: - -h --help Show this screen. - --version Show version. - --speed= Speed in knots [default: 10]. - --moored Moored (anchored) mine. - --drifting Drifting mine. -"; - -#[derive(Debug, RustcDecodable)] -struct Args { - flag_speed: isize, - flag_drifting: bool, - arg_name: Vec, - arg_x: Option, - arg_y: Option, - cmd_ship: bool, - cmd_mine: bool, -} - -fn main() { - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - println!("{:?}", args); -} -``` - -Here is the same example, but with the use of the `docopt!` macro, which will -*generate a struct for you*. Note that this uses a compiler plugin, so it only -works on a **nightly Rust compiler**: - -```rust -#![feature(plugin)] -#![plugin(docopt_macros)] - -extern crate rustc_serialize; -extern crate docopt; - -use docopt::Docopt; - -docopt!(Args derive Debug, " -Naval Fate. - -Usage: - naval_fate.py ship new ... - naval_fate.py ship move [--speed=] - naval_fate.py ship shoot - naval_fate.py mine (set|remove) [--moored | --drifting] - naval_fate.py (-h | --help) - naval_fate.py --version - -Options: - -h --help Show this screen. - --version Show version. - --speed= Speed in knots [default: 10]. - --moored Moored (anchored) mine. - --drifting Drifting mine. -"); - -fn main() { - let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit()); - println!("{:?}", args); -} -``` - -The `Args` struct has one static method defined for it: `docopt`. The method -returns a normal `Docopt` value, which can be used to set configuration -options, `argv` and parse or decode command line arguments. - - -### Struct field name mapping - -The field names of the struct map like this: - -``` --g => flag_g ---group => flag_group ---group => flag_group -FILE => arg_FILE - => arg_file -build => cmd_build -``` - - -### Data validation example - -Here's another example that shows how to specify the types of your arguments: - -```rust -#![feature(plugin)] -#![plugin(docopt_macros)] - -extern crate rustc_serialize; - -extern crate docopt; - -docopt!(Args, "Usage: add ", arg_x: i32, arg_y: i32); - -fn main() { - let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit()); - println!("x: {}, y: {}", args.arg_x, args.arg_y); -} -``` - -In this example, specific type annotations were added. They will be -automatically inserted into the generated struct. You can override as many (or -as few) fields as you want. If you don't specify a type, then one of `bool`, -`u64`, `String` or `Vec` will be chosen depending on the type of -argument. In this case, both `arg_x` and `arg_y` would have been `String`. - -If any value cannot be decoded into a value with the right type, then an error -will be shown to the user. - -And of course, you don't need the macro to do this. You can do the same thing -with a manually written struct too. - - -### Modeling `rustc` - -Here's a selected subset for some of `rustc`'s options. This also shows how to -restrict values to a list of choices via an `enum` type and demonstrates more -Docopt features. - -```rust -#![feature(plugin)] -#![plugin(docopt_macros)] - -extern crate rustc_serialize; - -extern crate docopt; - -docopt!(Args derive Debug, " -Usage: rustc [options] [--cfg SPEC... -L PATH...] INPUT - rustc (--help | --version) - -Options: - -h, --help Show this message. - --version Show the version of rustc. - --cfg SPEC Configure the compilation environment. - -L PATH Add a directory to the library search path. - --emit TYPE Configure the output that rustc will produce. - Valid values: asm, ir, bc, obj, link. - --opt-level LEVEL Optimize with possible levels 0-3. -", flag_opt_level: Option, flag_emit: Option); - -#[derive(RustcDecodable, Debug)] -enum Emit { Asm, Ir, Bc, Obj, Link } - -#[derive(Debug)] -enum OptLevel { Zero, One, Two, Three } - -impl rustc_serialize::Decodable for OptLevel { - fn decode(d: &mut D) -> Result { - Ok(match try!(d.read_usize()) { - 0 => OptLevel::Zero, 1 => OptLevel::One, - 2 => OptLevel::Two, 3 => OptLevel::Three, - n => { - let err = format!("Could not decode '{}' as opt-level.", n); - return Err(d.error(&*err)); - } - }) - } -} - -fn main() { - let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit()); - println!("{:?}", args); -} -``` - -### Viewing the generated struct - -Generating a struct is pretty magical, but if you want, you can look at it by -expanding all macros. Say you wrote the above example for `Usage: add ` -into a file called `add.rs`. Then running: - -```bash -rustc -L path/containing/docopt/lib -Z unstable-options --pretty=expanded add.rs -``` - -Will show all macros expanded. The `path/containing/docopt/lib` is usually -`target/debug/deps` or `target/release/deps` in a cargo project. In the generated code, you should be -able to find the generated struct: - -```rust -struct Args { - pub arg_x: int, - pub arg_y: int, -} -``` - - -### Traditional Docopt API - -The reference implementation of Docopt returns a Python dictionary with names -like `` or `--flag`. If you prefer this access pattern, then you can use -`docopt::ArgvMap`. The disadvantage is that you have to do all of your type -conversion manually. Here's the canonical Docopt example with a hash table: - -```rust -extern crate docopt; - -use docopt::Docopt; - -const USAGE: &'static str = " -Naval Fate. - -Usage: - naval_fate.py ship new ... - naval_fate.py ship move [--speed=] - naval_fate.py ship shoot - naval_fate.py mine (set|remove) [--moored | --drifting] - naval_fate.py (-h | --help) - naval_fate.py --version - -Options: - -h --help Show this screen. - --version Show version. - --speed= Speed in knots [default: 10]. - --moored Moored (anchored) mine. - --drifting Drifting mine. -"; - -fn main() { - let args = Docopt::new(USAGE) - .and_then(|dopt| dopt.parse()) - .unwrap_or_else(|e| e.exit()); - println!("{:?}", args); - - // You can conveniently access values with `get_{bool,count,str,vec}` - // functions. If the key doesn't exist (or if, e.g., you use `get_str` on - // a switch), then a sensible default value is returned. - println!("\nSome values:"); - println!(" Speed: {}", args.get_str("--speed")); - println!(" Drifting? {}", args.get_bool("--drifting")); - println!(" Names: {:?}", args.get_vec("")); -} -``` - -### Tab completion support - -This particular implementation bundles a command called `docopt-wordlist` that -can be used to automate tab completion. This repository also collects some -basic completion support for various shells (currently only bash) in the -`completions` directory. - -You can use them to setup tab completion on your system. It should work with -any program that uses Docopt (or rather, any program that outputs usage -messages that look like Docopt). For example, to get tab completion support for -Cargo, you'll have to install `docopt-wordlist` and add some voodoo to your -`$HOME/.bash_completion` file (this may vary for other shells). - -Here it is step by step: - -```bash -# Download and build `docopt-wordlist` (as part of the Docopt package) -$ git clone git://github.com/docopt/docopt.rs -$ cd docopt.rs -$ cargo build --release - -# Now setup tab completion (for bash) -$ echo "DOCOPT_WORDLIST_BIN=\"$(pwd)/target/release/docopt-wordlist\"" >> $HOME/.bash_completion -$ echo "source \"$(pwd)/completions/docopt-wordlist.bash\"" >> $HOME/.bash_completion -$ echo "complete -F _docopt_wordlist_commands cargo" >> $HOME/.bash_completion -``` - -My [CSV toolkit](https://github.com/BurntSushi/xsv) is supported too: - -```bash -# shameless plug... -$ echo "complete -F _docopt_wordlist_commands xsv" >> $HOME/.bash_completion -``` - -Note that this is emphatically a first pass. There are several improvements -that I'd like to make: - -1. Take context into account when completing. For example, it should be - possible to only show completions that can lead to a valid Docopt match. - This may be hard. (i.e., It may require restructuring Docopt's internals.) -2. Support more shells. (I'll happily accept pull requests on this one. I doubt - I'll venture outside of bash any time soon.) -3. Make tab completion support more seamless. The way it works right now is - pretty hacky by intermingling file/directory completion. diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/scripts/mk-testcases cargo-0.19.0/vendor/docopt-0.6.86/scripts/mk-testcases --- cargo-0.17.0/vendor/docopt-0.6.86/scripts/mk-testcases 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/scripts/mk-testcases 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -#!/usr/bin/env python2 - -from __future__ import absolute_import, division, print_function -import argparse -import json -import re - -retests = re.compile('(.*?)"""(.*?)(r"""|\s*$)', re.DOTALL) -reinvokes = re.compile('(.+?$)(.+?)\s*(\$|\Z)', re.DOTALL | re.MULTILINE) - -p = argparse.ArgumentParser( - description="Outputs src/test/testcases.rs to stdout") -p.add_argument("testcases", metavar="FILE", - help="The testcases.docopt language agnostic test suite.") -args = p.parse_args() - -with open(args.testcases) as f: - alltests = f.read() - -alltests = re.sub('^r"""', '', alltests) -alltests = re.sub('^\s*#.*$', '', alltests, flags=re.MULTILINE) - -tests = [] # [{usage, args, expect}] (expect is None ==> user-error) -for m in retests.finditer(alltests): - usage, invokes = m.group(1).strip(), m.group(2).strip() - assert invokes.startswith('$'), 'Bad test: "%s"' % invokes - invokes = re.sub('^\$', '', invokes) - - for mi in reinvokes.finditer(invokes): - invoke, expect = mi.group(1).strip(), mi.group(2).strip() - err = expect.startswith('"user-error"') - tests.append({ - 'usage': usage, - 'args': invoke.split()[1:], - 'expect': None if err else json.loads(expect), - }) - - -def show_test(i, t): - def show_expect(e): - kvs = [] - for k, v in e.iteritems(): - kvs.append('("%s", %s)' % (k, show_value(v))) - return ', '.join(kvs) - def show_value(v): - if v is None: - return 'Plain(None)' - elif isinstance(v, basestring): - return 'Plain(Some("%s".to_string()))' % v - elif isinstance(v, bool): - return 'Switch(%s)' % ('true' if v else 'false') - elif isinstance(v, int): - return 'Counted(%d)' % v - elif isinstance(v, list): - elms = ', '.join(['"%s".to_string()' % el for el in v]) - return 'List(vec!(%s))' % elms - else: - raise ValueError('Unrecognized value: "%s" (type: %s)' - % (v, type(v))) - - args = ', '.join(['"%s"' % arg for arg in t['args']]) - if t['expect'] is None: - return 'test_user_error!(test_%d_testcases, "%s", &[%s]);' \ - % (i, t['usage'], args) - else: - expect = show_expect(t['expect']) - return 'test_expect!(test_%d_testcases, "%s", &[%s], vec!(%s));' \ - % (i, t['usage'], args, expect) - -print( -"""// !!! ATTENTION !!! -// This file is automatically generated by `scripts/mk-testcases`. -// Please do not edit this file directly! - -use Value::{{Switch, Counted, Plain, List}}; -use test::{{get_args, map_from_alist, same_args}}; - -{tests} -""".format(tests='\n\n'.join([show_test(i, t) for i, t in enumerate(tests)]))) - diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/session.vim cargo-0.19.0/vendor/docopt-0.6.86/session.vim --- cargo-0.17.0/vendor/docopt-0.6.86/session.vim 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/session.vim 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -au BufWritePost *.rs silent!make ctags > /dev/null 2>&1 -" let g:syntastic_rust_rustc_fname = "src/lib.rs" -" let g:syntastic_rust_rustc_args = "--no-trans" diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/src/dopt.rs cargo-0.19.0/vendor/docopt-0.6.86/src/dopt.rs --- cargo-0.17.0/vendor/docopt-0.6.86/src/dopt.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/src/dopt.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,895 +0,0 @@ -use std::collections::HashMap; -use std::error::Error as StdError; -use std::fmt::{self, Debug}; -use std::io::{self, Write}; -use std::str::FromStr; - -use regex::{Captures, Regex}; -use rustc_serialize::Decodable; - -use parse::Parser; -use synonym::SynonymMap; - -use self::Value::{Switch, Counted, Plain, List}; -use self::Error::{Usage, Argv, NoMatch, Decode, WithProgramUsage, Help, Version}; - -/// Represents the different types of Docopt errors. -/// -/// This error type has a lot of variants. In the common case, you probably -/// don't care why Docopt has failed, and would rather just quit the program -/// and show an error message instead. The `exit` method defined on the `Error` -/// type will do just that. It will also set the exit code appropriately -/// (no error for `--help` or `--version`, but an error code for bad usage, -/// bad argv, no match or bad decode). -/// -/// ### Example -/// -/// Generally, you want to parse the usage string, try to match the argv -/// and then quit the program if there was an error reported at any point -/// in that process. This can be achieved like so: -/// -/// ```no_run -/// use docopt::Docopt; -/// -/// const USAGE: &'static str = " -/// Usage: ... -/// "; -/// -/// let args = Docopt::new(USAGE) -/// .and_then(|d| d.parse()) -/// .unwrap_or_else(|e| e.exit()); -/// ``` -#[derive(Debug)] -pub enum Error { - /// Parsing the usage string failed. - /// - /// This error can only be triggered by the programmer, i.e., the writer - /// of the Docopt usage string. This error is usually indicative of a bug - /// in your program. - Usage(String), - - /// Parsing the argv specified failed. - /// - /// The payload is a string describing why the arguments provided could not - /// be parsed. - /// - /// This is distinct from `NoMatch` because it will catch errors like - /// using flags that aren't defined in the usage string. - Argv(String), - - /// The given argv parsed successfully, but it did not match any example - /// usage of the program. - /// - /// Regrettably, there is no descriptive message describing *why* the - /// given argv didn't match any of the usage strings. - NoMatch, - - /// This indicates a problem decoding a successful argv match into a - /// decodable value. - Decode(String), - - /// Parsing failed, and the program usage should be printed next to the - /// failure message. Typically this wraps `Argv` and `NoMatch` errors. - WithProgramUsage(Box, String), - - /// Decoding or parsing failed because the command line specified that the - /// help message should be printed. - Help, - - /// Decoding or parsing failed because the command line specified that the - /// version should be printed - /// - /// The version is included as a payload to this variant. - Version(String), -} - -impl Error { - /// Return whether this was a fatal error or not. - /// - /// Non-fatal errors include requests to print the help or version - /// information of a program, while fatal errors include those such as - /// failing to decode or parse. - pub fn fatal(&self) -> bool { - match *self { - Help | Version(..) => false, - Usage(..) | Argv(..) | NoMatch | Decode(..) => true, - WithProgramUsage(ref b, _) => b.fatal(), - } - } - - /// Print this error and immediately exit the program. - /// - /// If the error is non-fatal (e.g., `Help` or `Version`), then the - /// error is printed to stdout and the exit status will be `0`. Otherwise, - /// when the error is fatal, the error is printed to stderr and the - /// exit status will be `1`. - pub fn exit(&self) -> ! { - if self.fatal() { - werr!("{}\n", self); - ::std::process::exit(1) - } else { - let _ = writeln!(&mut io::stdout(), "{}", self); - ::std::process::exit(0) - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - WithProgramUsage(ref other, ref usage) => { - let other = other.to_string(); - if other.is_empty() { - write!(f, "{}", usage) - } else { - write!(f, "{}\n\n{}", other, usage) - } - } - Help => write!(f, ""), - NoMatch => write!(f, "Invalid arguments."), - Usage(ref s) | Argv(ref s) | Decode(ref s) | Version(ref s) => { - write!(f, "{}", s) - } - } - } -} - -impl StdError for Error { - fn description(&self) -> &str { - match *self { - Usage(..) => "invalid usage string", - Argv(..) => "failed to parse specified argv", - NoMatch => "could not match specified argv", - Decode(..) => "failed to decode", - WithProgramUsage(..) => "failed to parse specified argv", - Help => "help message requested", - Version(..) => "version message requested", - } - } - - fn cause(&self) -> Option<&StdError> { - match *self { - WithProgramUsage(ref cause, _) => Some(&**cause), - _ => None, - } - } -} - -/// The main Docopt type, which is constructed with a Docopt usage string. -/// -/// This can be used to match command line arguments to produce a `ArgvMap`. -#[derive(Clone, Debug)] -pub struct Docopt { - p: Parser, - argv: Option>, - options_first: bool, - help: bool, - version: Option, -} - -impl Docopt { - /// Parse the Docopt usage string given. - /// - /// The `Docopt` value returned may be used immediately to parse command - /// line arguments with a default configuration. - /// - /// If there was a problem parsing the usage string, a `Usage` error - /// is returned. - pub fn new(usage: S) -> Result - where S: ::std::ops::Deref { - Parser::new(usage.deref()) - .map_err(Usage) - .map(|p| Docopt { - p: p, - argv: None, - options_first: false, - help: true, - version: None, - }) - } - - /// Parse and decode the given argv. - /// - /// This is a convenience method for - /// `parse().and_then(|vals| vals.decode())`. - /// - /// For details on how decoding works, please see the documentation for - /// `ArgvMap`. - pub fn decode(&self) -> Result where D: Decodable { - self.parse().and_then(|vals| vals.decode()) - } - - /// Parse command line arguments and try to match them against a usage - /// pattern specified in the Docopt string. - /// - /// If there is a match, then an `ArgvMap` is returned, which maps - /// flags, commands and arguments to values. - /// - /// If parsing the command line arguments fails, then an `Argv` error is - /// returned. If parsing succeeds but there is no match, then a `NoMatch` - /// error is returned. Both of these errors are always returned inside a - /// `WithProgramUsage` error. - /// - /// If special handling of `help` or `version` is enabled (the former is - /// enabled by default), then `Help` or `Version` errors are returned - /// if `--help` or `--version` is present. - pub fn parse(&self) -> Result { - let argv = self.argv.clone().unwrap_or_else(Docopt::get_argv); - let vals = try!( - self.p.parse_argv(argv, self.options_first) - .map_err(|s| self.err_with_usage(Argv(s))) - .and_then(|argv| - match self.p.matches(&argv) { - Some(m) => Ok(ArgvMap { map: m }), - None => Err(self.err_with_usage(NoMatch)), - })); - if self.help && vals.get_bool("--help") { - return Err(self.err_with_full_doc(Help)); - } - match self.version { - Some(ref v) if vals.get_bool("--version") => { - return Err(Version(v.clone())) - } - _ => {}, - } - Ok(vals) - } - - /// Set the argv to be used for Docopt parsing. - /// - /// By default, when no argv is set, and it is automatically taken from - /// `std::env::args()`. - /// - /// The `argv` given *must* be the full set of `argv` passed to the - /// program. e.g., `["cp", "src", "dest"]` is right while `["src", "dest"]` - /// is wrong. - pub fn argv(mut self, argv: I) -> Docopt - where I: IntoIterator, S: AsRef { - self.argv = Some( - argv.into_iter().skip(1).map(|s| s.as_ref().to_owned()).collect() - ); - self - } - - /// Enables the "options first" Docopt behavior. - /// - /// The options first behavior means that all flags *must* appear before - /// position arguments. That is, after the first position argument is - /// seen, all proceeding arguments are interpreted as positional - /// arguments unconditionally. - pub fn options_first(mut self, yes: bool) -> Docopt { - self.options_first = yes; - self - } - - /// Enables automatic handling of `--help`. - /// - /// When this is enabled and `--help` appears anywhere in the arguments, - /// then a `Help` error will be returned. You may then use the `exit` - /// method on the error value to conveniently quit the program (which will - /// print the full usage string to stdout). - /// - /// Note that for this to work, `--help` must be a valid pattern. - /// - /// When disabled, there is no special handling of `--help`. - pub fn help(mut self, yes: bool) -> Docopt { - self.help = yes; - self - } - - /// Enables automatic handling of `--version`. - /// - /// When this is enabled and `--version` appears anywhere in the arguments, - /// then a `Version(s)` error will be returned, where `s` is the string - /// given here. You may then use the `exit` method on the error value to - /// convenient quit the program (which will print the version to stdout). - /// - /// When disabled (a `None` value), there is no special handling of - /// `--version`. - pub fn version(mut self, version: Option) -> Docopt { - self.version = version; - self - } - - #[doc(hidden)] - // Exposed for use in `docopt_macros`. - pub fn parser(&self) -> &Parser { - &self.p - } - - fn err_with_usage(&self, e: Error) -> Error { - WithProgramUsage( - Box::new(e), self.p.usage.trim().into()) - } - - fn err_with_full_doc(&self, e: Error) -> Error { - WithProgramUsage( - Box::new(e), self.p.full_doc.trim().into()) - } - - fn get_argv() -> Vec { - // Hmm, we should probably handle a Unicode decode error here... ---AG - ::std::env::args().skip(1).collect() - } -} - -/// A map containing matched values from command line arguments. -/// -/// The keys are just as specified in Docopt: `--flag` for a long flag or -/// `-f` for a short flag. (If `-f` is a synonym for `--flag`, then either -/// key will work.) `ARG` or `` specify a positional argument and `cmd` -/// specifies a command. -#[derive(Clone)] -pub struct ArgvMap { - #[doc(hidden)] - pub map: SynonymMap, -} - -impl ArgvMap { - /// Tries to decode the map of values into a struct. - /// - /// This method should always be called to decode a `ArgvMap` into - /// a struct. All fields of the struct must map to a corresponding key - /// in the `ArgvMap`. To this end, each member must have a special prefix - /// corresponding to the different kinds of patterns in Docopt. There are - /// three prefixes: `flag_`, `arg_` and `cmd_` which respectively - /// correspond to short/long flags, positional arguments and commands. - /// - /// If a Docopt item has a `-` in its name, then it is converted to an `_`. - /// - /// # Example - /// - /// ```rust - /// # extern crate docopt; - /// # extern crate rustc_serialize; - /// # fn main() { - /// use docopt::Docopt; - /// - /// const USAGE: &'static str = " - /// Usage: cargo [options] (build | test) - /// cargo --help - /// - /// Options: -v, --verbose - /// -h, --help - /// "; - /// - /// #[derive(RustcDecodable)] - /// struct Args { - /// cmd_build: bool, - /// cmd_test: bool, - /// flag_verbose: bool, - /// flag_h: bool, - /// } - /// - /// let argv = || vec!["cargo", "build", "-v"].into_iter(); - /// let args: Args = Docopt::new(USAGE) - /// .and_then(|d| d.argv(argv()).decode()) - /// .unwrap_or_else(|e| e.exit()); - /// assert!(args.cmd_build && !args.cmd_test - /// && args.flag_verbose && !args.flag_h); - /// # } - /// ``` - /// - /// Note that in the above example, `flag_h` is used but `flag_help` - /// could also be used. (In fact, both could be used at the same time.) - /// - /// In this example, only the `bool` type was used, but any type satisfying - /// the `Decodable` trait is valid. - pub fn decode(self) -> Result { - Decodable::decode(&mut Decoder { vals: self, stack: vec!() }) - } - - /// Finds the value corresponding to `key` and calls `as_bool()` on it. - /// If the key does not exist, `false` is returned. - pub fn get_bool(&self, key: &str) -> bool { - self.find(key).map_or(false, |v| v.as_bool()) - } - - /// Finds the value corresponding to `key` and calls `as_count()` on it. - /// If the key does not exist, `0` is returned. - pub fn get_count(&self, key: &str) -> u64 { - self.find(key).map_or(0, |v| v.as_count()) - } - - /// Finds the value corresponding to `key` and calls `as_str()` on it. - /// If the key does not exist, `""` is returned. - pub fn get_str(&self, key: &str) -> &str { - self.find(key).map_or("", |v| v.as_str()) - } - - /// Finds the value corresponding to `key` and calls `as_vec()` on it. - /// If the key does not exist, `vec!()` is returned. - pub fn get_vec(&self, key: &str) -> Vec<&str> { - self.find(key).map(|v| v.as_vec()).unwrap_or(vec!()) - } - - /// Return the raw value corresponding to some `key`. - /// - /// `key` should be a string in the traditional Docopt format. e.g., - /// `` or `--flag`. - pub fn find(&self, key: &str) -> Option<&Value> { - self.map.find(&key.into()) - } - - /// Return the number of values, not including synonyms. - pub fn len(&self) -> usize { - self.map.len() - } - - /// Converts a Docopt key to a struct field name. - /// This makes a half-hearted attempt at making the key a valid struct - /// field name (like replacing `-` with `_`), but it does not otherwise - /// guarantee that the result is a valid struct field name. - #[doc(hidden)] - pub fn key_to_struct_field(name: &str) -> String { - lazy_static! { - static ref RE: Regex = regex!( - r"^(?:--?(?P\S+)|(?:(?P\p{Lu}+)|<(?P[^>]+)>)|(?P\S+))$" - ); - } - fn sanitize(name: &str) -> String { - name.replace("-", "_") - } - - RE.replace(name, |cap: &Captures| { - let (flag, cmd) = ( - cap.name("flag").unwrap_or(""), - cap.name("cmd").unwrap_or(""), - ); - let (argu, argb) = ( - cap.name("argu").unwrap_or(""), - cap.name("argb").unwrap_or(""), - ); - let (prefix, name) = - if !flag.is_empty() { - ("flag_", flag) - } else if !argu.is_empty() { - ("arg_", argu) - } else if !argb.is_empty() { - ("arg_", argb) - } else if !cmd.is_empty() { - ("cmd_", cmd) - } else { - panic!("Unknown ArgvMap key: '{}'", name) - }; - let mut prefix = prefix.to_owned(); - prefix.push_str(&sanitize(name)); - prefix - }) - } - - /// Converts a struct field name to a Docopt key. - #[doc(hidden)] - pub fn struct_field_to_key(field: &str) -> String { - lazy_static! { - static ref FLAG: Regex = regex!(r"^flag_"); - static ref ARG: Regex = regex!(r"^arg_"); - static ref LETTERS: Regex = regex!(r"^\p{Lu}+$"); - static ref CMD: Regex = regex!(r"^cmd_"); - } - fn desanitize(name: &str) -> String { - name.replace("_", "-") - } - let name = - if field.starts_with("flag_") { - let name = FLAG.replace(field, ""); - let mut pre_name = (if name.len() == 1 { "-" } else { "--" }) - .to_owned(); - pre_name.push_str(&*name); - pre_name - } else if field.starts_with("arg_") { - let name = ARG.replace(field, ""); - if LETTERS.is_match(&name) { - name - } else { - let mut pre_name = "<".to_owned(); - pre_name.push_str(&*name); - pre_name.push('>'); - pre_name - } - } else if field.starts_with("cmd_") { - CMD.replace(field, "") - } else { - panic!("Unrecognized struct field: '{}'", field) - }; - desanitize(&*name) - } -} - -impl fmt::Debug for ArgvMap { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.len() == 0 { - return write!(f, "{{EMPTY}}"); - } - - // This is a little crazy, but we want to group synonyms with - // their keys and sort them for predictable output. - let reverse: HashMap<&String, &String> = - self.map.synonyms().map(|(from, to)| (to, from)).collect(); - let mut keys: Vec<&String> = self.map.keys().collect(); - keys.sort(); - let mut first = true; - for &k in &keys { - if !first { try!(write!(f, "\n")); } else { first = false; } - match reverse.get(&k) { - None => { - try!(write!(f, "{} => {:?}", k, self.map.get(k))) - } - Some(s) => { - try!(write!(f, "{}, {} => {:?}", s, k, self.map.get(k))) - } - } - } - Ok(()) - } -} - -/// A matched command line value. -/// -/// The value can be a boolean, counted repetition, a plain string or a list -/// of strings. -/// -/// The various `as_{bool,count,str,vec}` methods provide convenient access -/// to values without destructuring manually. -#[derive(Clone, Debug, PartialEq)] -pub enum Value { - /// A boolean value from a flag that has no argument. - /// - /// The presence of a flag means `true` and the absence of a flag - /// means `false`. - Switch(bool), - - /// The number of occurrences of a repeated flag. - Counted(u64), - - /// A positional or flag argument. - /// - /// This is `None` when the positional argument or flag is not present. - /// Note that it is possible to have `Some("")` for a present but empty - /// argument. - Plain(Option), - - /// A List of positional or flag arguments. - /// - /// This list may be empty when no arguments or flags are present. - List(Vec), -} - -impl Value { - /// Returns the value as a bool. - /// - /// Counted repetitions are `false` if `0` and `true` otherwise. - /// Plain strings are `true` if present and `false` otherwise. - /// Lists are `true` if non-empty and `false` otherwise. - pub fn as_bool(&self) -> bool { - match *self { - Switch(b) => b, - Counted(n) => n > 0, - Plain(None) => false, - Plain(Some(_)) => true, - List(ref vs) => !vs.is_empty(), - } - } - - /// Returns the value as a count of the number of times it occurred. - /// - /// Booleans are `1` if `true` and `0` otherwise. - /// Plain strings are `1` if present and `0` otherwise. - /// Lists correspond to its length. - pub fn as_count(&self) -> u64 { - match *self { - Switch(b) => if b { 1 } else { 0 }, - Counted(n) => n, - Plain(None) => 0, - Plain(Some(_)) => 1, - List(ref vs) => vs.len() as u64, - } - } - - /// Returns the value as a string. - /// - /// All values return an empty string except for a non-empty plain string. - pub fn as_str(&self) -> &str { - match *self { - Switch(_) | Counted(_) | Plain(None) | List(_) => "", - Plain(Some(ref s)) => &**s, - } - } - - /// Returns the value as a list of strings. - /// - /// Booleans, repetitions and empty strings correspond to an empty list. - /// Plain strings correspond to a list of length `1`. - pub fn as_vec(&self) -> Vec<&str> { - match *self { - Switch(_) | Counted(_) | Plain(None) => vec![], - Plain(Some(ref s)) => vec![&**s], - List(ref vs) => vs.iter().map(|s| &**s).collect(), - } - } -} - -/// Decoder for `ArgvMap` into your own `Decodable` types. -/// -/// In general, you shouldn't have to use this type directly. It is exposed -/// in case you want to write a generic function that produces a decodable -/// value. For example, here's a function that takes a usage string, an argv -/// and produces a decodable value: -/// -/// ```rust -/// # extern crate docopt; -/// # extern crate rustc_serialize; -/// # fn main() { -/// use docopt::Docopt; -/// use rustc_serialize::Decodable; -/// -/// fn decode(usage: &str, argv: &[&str]) -/// -> Result { -/// Docopt::new(usage) -/// .and_then(|d| d.argv(argv.iter().cloned()).decode()) -/// } -/// # } -pub struct Decoder { - vals: ArgvMap, - stack: Vec, -} - -#[derive(Debug)] -struct DecoderItem { - key: String, - struct_field: String, - val: Option, -} - -macro_rules! derr( - ($($arg:tt)*) => (return Err(Decode(format!($($arg)*)))) -); - -impl Decoder { - fn push(&mut self, struct_field: &str) { - let key = ArgvMap::struct_field_to_key(struct_field); - self.stack.push(DecoderItem { - key: key.clone(), - struct_field: struct_field.into(), - val: self.vals.find(&*key).cloned(), - }); - } - - fn pop(&mut self) -> Result { - match self.stack.pop() { - None => derr!("Could not decode value into unknown key."), - Some(it) => Ok(it) - } - } - - fn pop_key_val(&mut self) -> Result<(String, Value), Error> { - let it = try!(self.pop()); - match it.val { - None => derr!( - "Could not find argument '{}' (from struct field '{}'). -Note that each struct field must have the right key prefix, which must -be one of `cmd_`, `flag_` or `arg_`.", - it.key, it.struct_field), - Some(v) => Ok((it.key, v)) - } - } - - fn pop_val(&mut self) -> Result { - let (_, v) = try!(self.pop_key_val()); - Ok(v) - } - - fn to_number(&mut self, expect: &str) -> Result - where T: FromStr + ToString, ::Err: Debug { - let (k, v) = try!(self.pop_key_val()); - match v { - Counted(n) => Ok(n.to_string().parse().unwrap()), // lol - _ => { - if v.as_str().trim().is_empty() { - Ok("0".parse().unwrap()) // lol - } else { - match v.as_str().parse() { - Err(_) => { - derr!("Could not decode '{}' to {} for '{}'.", - v.as_str(), expect, k) - } - Ok(v) => Ok(v), - } - } - } - } - } - - fn to_float(&mut self, expect: &str) -> Result { - let (k, v) = try!(self.pop_key_val()); - match v { - Counted(n) => Ok(n as f64), - _ => { - match v.as_str().parse() { - Err(_) => derr!("Could not decode '{}' to {} for '{}'.", - v.as_str(), expect, k), - Ok(v) => Ok(v), - } - } - } - } -} - -macro_rules! read_num { - ($name:ident, $ty:ty) => ( - fn $name(&mut self) -> Result<$ty, Error> { - self.to_number::<$ty>(stringify!($ty)).map(|n| n as $ty) - } - ); -} - -impl ::rustc_serialize::Decoder for Decoder { - type Error = Error; - - fn error(&mut self, err: &str) -> Error { - Decode(err.into()) - } - - fn read_nil(&mut self) -> Result<(), Error> { - // I don't know what the right thing is here, so just fail for now. - panic!("I don't know how to read into a nil value.") - } - - read_num!(read_usize, usize); - read_num!(read_u64, u64); - read_num!(read_u32, u32); - read_num!(read_u16, u16); - read_num!(read_u8, u8); - read_num!(read_isize, isize); - read_num!(read_i64, i64); - read_num!(read_i32, i32); - read_num!(read_i16, i16); - read_num!(read_i8, i8); - - fn read_bool(&mut self) -> Result { - self.pop_val().map(|v| v.as_bool()) - } - - fn read_f64(&mut self) -> Result { - self.to_float("f64") - } - - fn read_f32(&mut self) -> Result { - self.to_float("f32").map(|n| n as f32) - } - - fn read_char(&mut self) -> Result { - let (k, v) = try!(self.pop_key_val()); - let vstr = v.as_str(); - match vstr.chars().count() { - 1 => Ok(vstr.chars().next().unwrap()), - _ => derr!("Could not decode '{}' into char for '{}'.", vstr, k), - } - } - - fn read_str(&mut self) -> Result { - self.pop_val().map(|v| v.as_str().into()) - } - - fn read_enum(&mut self, _: &str, f: F) -> Result - where F: FnOnce(&mut Decoder) -> Result { - f(self) - } - - fn read_enum_variant(&mut self, names: &[&str], mut f: F) - -> Result - where F: FnMut(&mut Decoder, usize) -> Result { - let v = to_lowercase(try!(self.pop_val()).as_str()); - let i = - match names.iter().map(|&n| to_lowercase(n)).position(|n| n == v) { - Some(i) => i, - None => { - derr!("Could not match '{}' with any of \ - the allowed variants: {:?}", v, names) - } - }; - f(self, i) - } - - fn read_enum_variant_arg(&mut self, _: usize, _: F) - -> Result - where F: FnOnce(&mut Decoder) -> Result { - unimplemented!() - } - - fn read_enum_struct_variant(&mut self, _: &[&str], _: F) - -> Result - where F: FnMut(&mut Decoder, usize) -> Result { - unimplemented!() - } - - fn read_enum_struct_variant_field(&mut self, _: &str, _: usize, _: F) - -> Result - where F: FnOnce(&mut Decoder) -> Result { - unimplemented!() - } - - fn read_struct(&mut self, _: &str, _: usize, f: F) -> Result - where F: FnOnce(&mut Decoder) -> Result { - f(self) - } - - fn read_struct_field(&mut self, f_name: &str, _: usize, f: F) - -> Result - where F: FnOnce(&mut Decoder) -> Result { - self.push(f_name); - f(self) - } - - fn read_tuple(&mut self, _: usize, _: F) -> Result - where F: FnOnce(&mut Decoder) -> Result { - unimplemented!() - } - - fn read_tuple_arg(&mut self, _: usize, _: F) -> Result - where F: FnOnce(&mut Decoder) -> Result { - unimplemented!() - } - - fn read_tuple_struct(&mut self, _: &str, _: usize, _: F) - -> Result - where F: FnOnce(&mut Decoder) -> Result { - unimplemented!() - } - - fn read_tuple_struct_arg(&mut self, _: usize, _: F) - -> Result - where F: FnOnce(&mut Decoder) -> Result { - unimplemented!() - } - - fn read_option(&mut self, mut f: F) -> Result - where F: FnMut(&mut Decoder, bool) -> Result { - let option = - match self.stack.last() { - None => derr!("Could not decode value into unknown key."), - Some(it) => it.val.as_ref() - .map_or(false, |v| v.as_bool()) - }; - f(self, option) - } - - fn read_seq(&mut self, f: F) -> Result - where F: FnOnce(&mut Decoder, usize) -> Result { - let it = try!(self.pop()); - let list = it.val.unwrap_or(List(vec!())); - let vals = list.as_vec(); - for val in vals.iter().rev() { - self.stack.push(DecoderItem { - key: it.key.clone(), - struct_field: it.struct_field.clone(), - val: Some(Plain(Some((*val).into()))), - }) - } - f(self, vals.len()) - } - - fn read_seq_elt(&mut self, _: usize, f: F) -> Result - where F: FnOnce(&mut Decoder) -> Result { - f(self) - } - - fn read_map(&mut self, _: F) -> Result - where F: FnOnce(&mut Decoder, usize) -> Result { - unimplemented!() - } - - fn read_map_elt_key(&mut self, _: usize, _: F) -> Result - where F: FnOnce(&mut Decoder) -> Result { - unimplemented!() - } - - fn read_map_elt_val(&mut self, _: usize, _: F) -> Result - where F: FnOnce(&mut Decoder) -> Result { - unimplemented!() - } -} - -fn to_lowercase>(s: S) -> String { - s.into().chars().map(|c| c.to_lowercase().next().unwrap()).collect() -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/src/lib.rs cargo-0.19.0/vendor/docopt-0.6.86/src/lib.rs --- cargo-0.17.0/vendor/docopt-0.6.86/src/lib.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,242 +0,0 @@ -//! Docopt for Rust. This implementation conforms to the -//! [official description of Docopt](http://docopt.org/) and -//! [passes its test suite](https://github.com/docopt/docopt/pull/201). -//! -//! This library is [on GitHub](https://github.com/docopt/docopt.rs). -//! -//! Fundamentally, Docopt is a command line argument parser. The detail that -//! distinguishes it from most parsers is that the parser is derived from the -//! usage string. Here's a simple example: -//! -//! ```rust -//! use docopt::Docopt; -//! -//! // Write the Docopt usage string. -//! const USAGE: &'static str = " -//! Usage: cp [-a] -//! cp [-a] ... -//! -//! Options: -//! -a, --archive Copy everything. -//! "; -//! -//! // The argv. Normally you'd just use `parse` which will automatically -//! // use `std::env::args()`. -//! let argv = || vec!["cp", "-a", "file1", "file2", "dest/"]; -//! -//! // Parse argv and exit the program with an error message if it fails. -//! let args = Docopt::new(USAGE) -//! .and_then(|d| d.argv(argv().into_iter()).parse()) -//! .unwrap_or_else(|e| e.exit()); -//! -//! // Now access your argv values. Synonyms work just fine! -//! assert!(args.get_bool("-a") && args.get_bool("--archive")); -//! assert_eq!(args.get_vec(""), vec!["file1", "file2"]); -//! assert_eq!(args.get_str(""), "dest/"); -//! assert_eq!(args.get_str(""), ""); -//! ``` -//! -//! # Type based decoding -//! -//! Often, command line values aren't just strings or booleans---sometimes -//! they are integers, or enums, or something more elaborate. Using the -//! standard Docopt interface can be inconvenient for this purpose, because -//! you'll need to convert all of the values explicitly. Instead, this crate -//! provides a `Decoder` that converts an `ArgvMap` to your custom struct. -//! Here is the same example as above using type based decoding: -//! -//! ```rust -//! # extern crate docopt; -//! # extern crate rustc_serialize; -//! # fn main() { -//! use docopt::Docopt; -//! -//! // Write the Docopt usage string. -//! const USAGE: &'static str = " -//! Usage: cp [-a] -//! cp [-a] ... -//! -//! Options: -//! -a, --archive Copy everything. -//! "; -//! -//! #[derive(RustcDecodable)] -//! struct Args { -//! arg_source: Vec, -//! arg_dest: String, -//! arg_dir: String, -//! flag_archive: bool, -//! } -//! -//! let argv = || vec!["cp", "-a", "file1", "file2", "dest/"]; -//! let args: Args = Docopt::new(USAGE) -//! .and_then(|d| d.argv(argv().into_iter()).decode()) -//! .unwrap_or_else(|e| e.exit()); -//! -//! // Now access your argv values. -//! fn s(x: &str) -> String { x.to_string() } -//! assert!(args.flag_archive); -//! assert_eq!(args.arg_source, vec![s("file1"), s("file2")]); -//! assert_eq!(args.arg_dir, s("dest/")); -//! assert_eq!(args.arg_dest, s("")); -//! # } -//! ``` -//! -//! # Command line arguments for `rustc` -//! -//! Here's an example with a subset of `rustc`'s command line arguments that -//! shows more of Docopt and some of the benefits of type based decoding. -//! -//! ```rust -//! # extern crate docopt; -//! # extern crate rustc_serialize; -//! # fn main() { -//! # #![allow(non_snake_case)] -//! use docopt::Docopt; -//! -//! // Write the Docopt usage string. -//! const USAGE: &'static str = " -//! Usage: rustc [options] [--cfg SPEC... -L PATH...] INPUT -//! rustc (--help | --version) -//! -//! Options: -//! -h, --help Show this message. -//! --version Show the version of rustc. -//! --cfg SPEC Configure the compilation environment. -//! -L PATH Add a directory to the library search path. -//! --emit TYPE Configure the output that rustc will produce. -//! Valid values: asm, ir, bc, obj, link. -//! --opt-level LEVEL Optimize with possible levels 0-3. -//! "; -//! -//! #[derive(RustcDecodable)] -//! struct Args { -//! arg_INPUT: String, -//! flag_emit: Option, -//! flag_opt_level: Option, -//! flag_cfg: Vec, -//! flag_L: Vec, -//! flag_help: bool, -//! flag_version: bool, -//! } -//! -//! // This is easy. The decoder will automatically restrict values to -//! // strings that match one of the enum variants. -//! #[derive(RustcDecodable)] -//! # #[derive(Debug, PartialEq)] -//! enum Emit { Asm, Ir, Bc, Obj, Link } -//! -//! // This one is harder because we want the user to specify an integer, -//! // but restrict it to a specific range. So we implement `Decodable` -//! // ourselves. -//! # #[derive(Debug, PartialEq)] -//! enum OptLevel { Zero, One, Two, Three } -//! -//! impl rustc_serialize::Decodable for OptLevel { -//! fn decode(d: &mut D) -//! -> Result { -//! Ok(match try!(d.read_usize()) { -//! 0 => OptLevel::Zero, 1 => OptLevel::One, -//! 2 => OptLevel::Two, 3 => OptLevel::Three, -//! n => { -//! let err = format!( -//! "Could not decode '{}' as opt-level.", n); -//! return Err(d.error(&*err)); -//! } -//! }) -//! } -//! } -//! -//! let argv = || vec!["rustc", "-L", ".", "-L", "..", "--cfg", "a", -//! "--opt-level", "2", "--emit=ir", "docopt.rs"]; -//! let args: Args = Docopt::new(USAGE) -//! .and_then(|d| d.argv(argv().into_iter()).decode()) -//! .unwrap_or_else(|e| e.exit()); -//! -//! // Now access your argv values. -//! fn s(x: &str) -> String { x.to_string() } -//! assert_eq!(args.arg_INPUT, "docopt.rs".to_string()); -//! assert_eq!(args.flag_L, vec![s("."), s("..")]); -//! assert_eq!(args.flag_cfg, vec![s("a")]); -//! assert_eq!(args.flag_opt_level, Some(OptLevel::Two)); -//! assert_eq!(args.flag_emit, Some(Emit::Ir)); -//! # } -//! ``` -//! -//! # The `docopt!` macro -//! -//! This package comes bundled with an additional crate, `docopt_macros`, -//! which provides a `docopt!` syntax extension. Its purpose is to automate -//! the creation of a Rust struct from a Docopt usage string. In particular, -//! this provides a single point of truth about the definition of command line -//! arguments in your program. -//! -//! Another advantage of using the macro is that errors in your Docopt usage -//! string will be caught at compile time. Stated differently, your program -//! will not compile with an invalid Docopt usage string. -//! -//! The example above using type based decoding can be simplified to this: -//! -//! ```ignore -//! #![feature(plugin)] -//! #![plugin(docopt_macros)] -//! -//! extern crate rustc_serialize; -//! -//! extern crate docopt; -//! -//! // Write the Docopt usage string with the `docopt!` macro. -//! docopt!(Args, " -//! Usage: cp [-a] -//! cp [-a] ... -//! -//! Options: -//! -a, --archive Copy everything. -//! ") -//! -//! fn main() { -//! let argv = || vec!["cp", "-a", "file1", "file2", "dest/"]; -//! -//! // Your `Args` struct has a single static method defined on it, -//! // `docopt`, which will return a normal `Docopt` value. -//! let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit()); -//! -//! // Now access your argv values. -//! fn s(x: &str) -> String { x.to_string() } -//! assert!(args.flag_archive); -//! assert_eq!(args.arg_source, vec![s("file1"), s("file2")]); -//! assert_eq!(args.arg_dir, s("dest/")); -//! assert_eq!(args.arg_dest, s("")); -//! } -//! ``` - -#![crate_name = "docopt"] -#![doc(html_root_url = "http://burntsushi.net/rustdoc/docopt")] - -#![deny(missing_docs)] - -#[macro_use] -extern crate lazy_static; -extern crate regex; -extern crate rustc_serialize; -extern crate strsim; - -pub use dopt::{ArgvMap, Decoder, Docopt, Error, Value}; - -macro_rules! werr( - ($($arg:tt)*) => ({ - use std::io::{Write, stderr}; - write!(&mut stderr(), $($arg)*).unwrap(); - }) -); - -macro_rules! regex( - ($s:expr) => (::regex::Regex::new($s).unwrap()); -); - -mod dopt; -#[doc(hidden)] -pub mod parse; -mod synonym; -#[cfg(test)] -mod test; diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/src/parse.rs cargo-0.19.0/vendor/docopt-0.6.86/src/parse.rs --- cargo-0.17.0/vendor/docopt-0.6.86/src/parse.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/src/parse.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1490 +0,0 @@ -// I am overall pretty displeased with the quality of code in this module. -// I wrote it while simultaneously trying to build a mental model of Docopt's -// specification (hint: one does not exist in written form). As a result, there -// is a lot of coupling and some duplication. -// -// Some things that I think are good about the following code: -// -// - The representation of a "usage pattern." I think it is a minimal -// representation of a pattern's syntax. (One possible tweak: -// `Optional>` -> `Optional>`.) -// - Some disciplined use of regexes. I use a pretty basic state machine -// for parsing patterns, but for teasing out the patterns and options -// from the Docopt string and for picking out flags with arguments, I -// think regexes aren't too bad. There may be one or two scary ones though. -// - The core matching algorithm is reasonably simple and concise, but I -// think writing down some contracts will help me figure out how to make -// the code clearer. -// -// Some bad things: -// -// - I tried several times to split some of the pieces in this module into -// separate modules. I could find no clear separation. This suggests that -// there is too much coupling between parsing components. I'm not convinced -// that the coupling is necessary. -// - The parsers for patterns and argv share some structure. There may be -// an easy abstraction waiting there. -// - It is not efficient in the slightest. I tried to be conservative with -// copying strings, but I think I failed. (It may not be worthwhile to fix -// this if it makes the code more awkward. Docopt does not need to be -// efficient.) -// -// Some things to do immediately: -// -// - Document representation and invariants. -// - Less important: write contracts for functions. -// -// Long term: -// -// - Write a specification for Docopt. - -pub use self::Argument::{Zero, One}; -pub use self::Atom::{Short, Long, Command, Positional}; -use self::Pattern::{Alternates, Sequence, Optional, Repeat, PatAtom}; - -use std::borrow::ToOwned; -use std::collections::{HashMap, HashSet}; -use std::collections::hash_map::Entry::{Vacant, Occupied}; -use std::cmp::Ordering; -use std::fmt; -use regex; -use regex::Regex; -use strsim::levenshtein; - -use dopt::Value::{self, Switch, Counted, Plain, List}; -use synonym::SynonymMap; - -macro_rules! err( - ($($arg:tt)*) => (return Err(format!($($arg)*))) -); - -#[derive(Clone)] -pub struct Parser { - pub program: String, - pub full_doc: String, - pub usage: String, - pub descs: SynonymMap, - usages: Vec, - last_atom_added: Option, // context for [default: ...] -} - -impl Parser { - pub fn new(doc: &str) -> Result { - let mut d = Parser { - program: String::new(), - full_doc: doc.into(), - usage: String::new(), - usages: vec!(), - descs: SynonymMap::new(), - last_atom_added: None, - }; - try!(d.parse(doc)); - Ok(d) - } - - pub fn matches(&self, argv: &Argv) -> Option> { - for usage in &self.usages { - match Matcher::matches(argv, usage) { - None => continue, - Some(vals) => return Some(vals), - } - } - None - } - - pub fn parse_argv(&self, argv: Vec, options_first: bool) - -> Result { - Argv::new(self, argv, options_first) - } -} - -impl Parser { - fn options_atoms(&self) -> Vec { - let mut atoms = vec!(); - for (atom, _) in self.descs.iter().filter(|&(_, opts)| opts.is_desc) { - atoms.push(atom.clone()); - } - atoms - } - - fn has_arg(&self, atom: &Atom) -> bool { - match self.descs.find(atom) { - None => false, - Some(opts) => opts.arg.has_arg(), - } - } - - fn has_repeat(&self, atom: &Atom) -> bool { - match self.descs.find(atom) { - None => false, - Some(opts) => opts.repeats, - } - } - - fn parse(&mut self, doc: &str) -> Result<(), String> { - lazy_static! { - static ref MUSAGE: Regex = Regex::new( - r"(?s)(?i:usage):\s*(?P\S+)(?P.*?)(?:$|\n\s*\n)" - ).unwrap(); - } - let caps = match MUSAGE.captures(doc) { - None => err!("Could not find usage patterns in doc string."), - Some(caps) => caps, - }; - if caps.name("prog").unwrap_or("").is_empty() { - err!("Could not find program name in doc string.") - } - self.program = caps.name("prog").unwrap_or("").into(); - self.usage = caps.at(0).unwrap_or("").into(); - - // Before we parse the usage patterns, we look for option descriptions. - // We do this because the information in option descriptions can be - // used to resolve ambiguities in usage patterns (i.e., whether - // `--flag ARG` is a flag with an argument or not). - // - // From the docopt page, "every" line starting with a `-` or a `--` - // is considered an option description. Instead, we restrict the lines - // to any line *not* in the usage pattern section. - // - // *sigh* Apparently the above is not true. The official test suite - // includes `Options: -a ...`, which means some lines not beginning - // with `-` can actually have options. - let (pstart, pend) = caps.pos(0).unwrap(); - let (before, after) = (&doc[..pstart], &doc[pend..]); - // We process every line here (instead of restricting to lines starting - // with "-") because we need to check every line for a default value. - // The default value always belongs to the most recently defined desc. - for line in before.lines().chain(after.lines()) { - try!(self.parse_desc(line)); - } - - let mprog = format!( - "^(?:{})?\\s*(.*?)\\s*$", - regex::quote(caps.name("prog").unwrap_or(""))); - let pats = Regex::new(&*mprog).unwrap(); - - if caps.name("pats").unwrap_or("") == "" { - let pattern = try!(PatParser::new(self, "").parse()); - self.usages.push(pattern); - } else { - for line in caps.name("pats").unwrap_or("").lines() { - for pat in pats.captures_iter(line.trim()) { - let pattern = try!( - PatParser::new(self, pat.at(1).unwrap_or("")).parse()); - self.usages.push(pattern); - } - } - } - Ok(()) - } - - fn parse_desc(&mut self, full_desc: &str) -> Result<(), String> { - lazy_static! { - static ref OPTIONS: Regex = regex!(r"^\s*(?i:options:)\s*"); - static ref ISFLAG: Regex = regex!(r"^(-\S|--\S)"); - static ref REMOVE_DESC: Regex = regex!(r" .*$"); - static ref NORMALIZE_FLAGS: Regex = regex!(r"([^-\s]), -"); - static ref FIND_FLAGS: Regex = regex!(r"(?x) - (?:(?P--[^\x20\t=]+)|(?P-[^\x20\t=]+)) - (?:(?:\x20|=)(?P[^.-]\S*))? - (?P\x20\.\.\.)? - "); - } - let desc = OPTIONS.replace(full_desc.trim(), ""); - let desc = &*desc; - if !ISFLAG.is_match(desc) { - try!(self.parse_default(full_desc)); - return Ok(()) - } - - // Get rid of the description, which must be at least two spaces - // after the flag or argument. - let desc = REMOVE_DESC.replace(desc, ""); - // Normalize `-x, --xyz` to `-x --xyz`. - let desc = NORMALIZE_FLAGS.replace(&desc, "$1 -"); - let desc = desc.trim(); - - let (mut short, mut long) = <(String, String)>::default(); - let mut has_arg = false; - let mut last_end = 0; - let mut repeated = false; - for flags in FIND_FLAGS.captures_iter(desc) { - last_end = flags.pos(0).unwrap().1; - if !flags.name("repeated").unwrap_or("").is_empty() { - // If the "repeated" subcapture is not empty, then we have - // a valid repeated option. - repeated = true; - } - let (s, l) = ( - flags.name("short").unwrap_or(""), - flags.name("long").unwrap_or(""), - ); - if !s.is_empty() { - if !short.is_empty() { - err!("Only one short flag is allowed in an option \ - description, but found '{}' and '{}'.", short, s) - } - short = s.into() - } - if !l.is_empty() { - if !long.is_empty() { - err!("Only one long flag is allowed in an option \ - description, but found '{}' and '{}'.", long, l) - } - long = l.into() - } - if let Some(arg) = flags.name("arg") { - if !arg.is_empty() { - if !Atom::is_arg(arg) { - err!("Argument '{}' is not of the form ARG or .", - arg) - } - has_arg = true; // may be changed to default later - } - } - } - // Make sure that we consumed everything. If there are leftovers, - // then there is some malformed description. Alert the user. - assert!(last_end <= desc.len()); - if last_end < desc.len() { - err!("Extraneous text '{}' in option description '{}'.", - &desc[last_end..], desc) - } - try!(self.add_desc(&short, &long, has_arg, repeated)); - // Looking for default in this line must come after adding the - // description, otherwise `parse_default` won't know which option - // to assign it to. - self.parse_default(full_desc) - } - - fn parse_default(&mut self, desc: &str) -> Result<(), String> { - lazy_static! { - static ref FIND_DEFAULT: Regex = regex!( - r"\[(?i:default):(?P.*)\]" - ); - } - let defval = - match FIND_DEFAULT.captures(desc) { - None => return Ok(()), - Some(c) => c.name("val").unwrap_or("").trim(), - }; - let last_atom = - match self.last_atom_added { - None => err!("Found default value '{}' in '{}' before first \ - option description.", defval, desc), - Some(ref atom) => atom, - }; - let opts = - self.descs - .find_mut(last_atom) - .expect(&*format!("BUG: last opt desc key ('{:?}') is invalid.", - last_atom)); - match opts.arg { - One(None) => {}, // OK - Zero => - err!("Cannot assign default value '{}' to flag '{}' \ - that has no arguments.", defval, last_atom), - One(Some(ref curval)) => - err!("Flag '{}' already has a default value \ - of '{}' (second default value: '{}').", - last_atom, curval, defval), - } - opts.arg = One(Some(defval.into())); - Ok(()) - } - - fn add_desc( - &mut self, - short: &str, - long: &str, - has_arg: bool, - repeated: bool, - ) -> Result<(), String> { - assert!(!short.is_empty() || !long.is_empty()); - if !short.is_empty() && short.chars().count() != 2 { - // It looks like the reference implementation just ignores - // these lines. - return Ok(()); - } - let mut opts = Options::new( - repeated, if has_arg { One(None) } else { Zero }); - opts.is_desc = true; - - if !short.is_empty() && !long.is_empty() { - let (short, long) = (Atom::new(short), Atom::new(long)); - self.descs.insert(long.clone(), opts); - self.descs.insert_synonym(short, long.clone()); - self.last_atom_added = Some(long); - } else if !short.is_empty() { - let short = Atom::new(short); - self.descs.insert(short.clone(), opts); - self.last_atom_added = Some(short); - } else if !long.is_empty() { - let long = Atom::new(long); - self.descs.insert(long.clone(), opts); - self.last_atom_added = Some(long); - } - Ok(()) - } -} - -impl fmt::Debug for Parser { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - fn sorted(mut xs: Vec) -> Vec { - xs.sort(); xs - } - - try!(writeln!(f, "=====")); - try!(writeln!(f, "Program: {}", self.program)); - - try!(writeln!(f, "Option descriptions:")); - let keys = sorted(self.descs.keys().collect()); - for &k in &keys { - try!(writeln!(f, " '{}' => {:?}", k, self.descs.get(k))); - } - - try!(writeln!(f, "Synonyms:")); - let keys: Vec<(&Atom, &Atom)> = - sorted(self.descs.synonyms().collect()); - for &(from, to) in &keys { - try!(writeln!(f, " {:?} => {:?}", from, to)); - } - - try!(writeln!(f, "Usages:")); - for pat in &self.usages { - try!(writeln!(f, " {:?}", pat)); - } - writeln!(f, "=====") - } -} - -struct PatParser<'a> { - dopt: &'a mut Parser, - tokens: Vec, // used while parsing a single usage pattern - curi: usize, // ^^ index into pattern chars - expecting: Vec, // stack of expected ']' or ')' -} - -impl<'a> PatParser<'a> { - fn new(dopt: &'a mut Parser, pat: &str) -> PatParser<'a> { - PatParser { - dopt: dopt, - tokens: pattern_tokens(pat), - curi: 0, - expecting: vec!(), - } - } - - fn parse(&mut self) -> Result { - // let mut seen = HashSet::new(); - let mut p = try!(self.pattern()); - match self.expecting.pop() { - None => {}, - Some(c) => err!("Unclosed group. Expected '{}'.", c), - } - p.add_options_shortcut(self.dopt); - p.tag_repeats(&mut self.dopt.descs); - Ok(p) - } - - fn pattern(&mut self) -> Result { - let mut alts = vec!(); - let mut seq = vec!(); - while !self.is_eof() { - match self.cur() { - "..." => { - err!("'...' must appear directly after a group, argument, \ - flag or command.") - } - "-" | "--" => { - // As per specification, `-` and `--` by themselves are - // just commands that should be interpreted conventionally. - seq.push(try!(self.command())); - } - "|" => { - if seq.is_empty() { - err!("Unexpected '|'. Not in form 'a | b | c'.") - } - try!(self.next_noeof("pattern")); - alts.push(Sequence(seq)); - seq = vec!(); - } - "]" | ")" => { - if seq.is_empty() { - err!("Unexpected '{}'. Empty groups are not allowed.", - self.cur()) - } - match self.expecting.pop() { - None => err!("Unexpected '{}'. No open bracket found.", - self.cur()), - Some(c) => { - if c != self.cur().chars().next().unwrap() { - err!("Expected '{}' but got '{}'.", - c, self.cur()) - } - } - } - let mk: fn(Vec) -> Pattern = - if self.cur() == "]" { Optional } else { Sequence }; - self.next(); - return - if alts.is_empty() { - Ok(mk(seq)) - } else { - alts.push(Sequence(seq)); - Ok(mk(vec!(Alternates(alts)))) - } - } - "[" => { - // Check for special '[options]' shortcut. - if self.atis(1, "options") && self.atis(2, "]") { - self.next(); // cur == options - self.next(); // cur == ] - self.next(); - seq.push(self.maybe_repeat(Optional(vec!()))); - continue - } - self.expecting.push(']'); - seq.push(try!(self.group())); - } - "(" => { - self.expecting.push(')'); - seq.push(try!(self.group())); - } - _ => { - if Atom::is_short(self.cur()) { - seq.extend(try!(self.flag_short()).into_iter()); - } else if Atom::is_long(self.cur()) { - seq.push(try!(self.flag_long())); - } else if Atom::is_arg(self.cur()) { - // These are always positional. - // Arguments for -s and --short are picked up - // when parsing flags. - seq.push(try!(self.positional())); - } else if Atom::is_cmd(self.cur()) { - seq.push(try!(self.command())); - } else { - err!("Unknown token type '{}'.", self.cur()) - } - } - } - } - if alts.is_empty() { - Ok(Sequence(seq)) - } else { - alts.push(Sequence(seq)); - Ok(Alternates(alts)) - } - } - - fn flag_short(&mut self) -> Result, String> { - let mut seq = vec!(); - let stacked: String = self.cur()[1..].into(); - for (i, c) in stacked.chars().enumerate() { - let atom = self.dopt.descs.resolve(&Short(c)); - let mut pat = PatAtom(atom.clone()); - if self.dopt.has_repeat(&atom) { - pat = Pattern::repeat(pat); - } - seq.push(pat); - - // The only way for a short option to have an argument is if - // it's specified in an option description. - if !self.dopt.has_arg(&atom) { - self.add_atom_ifnotexists(Zero, &atom); - } else { - // At this point, the flag MUST have an argument. Therefore, - // we interpret the "rest" of the characters as the argument. - // If the "rest" is empty, then we peek to find and make sure - // there is an argument. - let rest = &stacked[i+1..]; - if rest.is_empty() { - try!(self.next_flag_arg(&atom)); - } else { - try!(self.errif_invalid_flag_arg(&atom, rest)); - } - // We either error'd or consumed the rest of the short stack as - // an argument. - break - } - } - self.next(); - // This is a little weird. We've got to manually look for a repeat - // operator right after the stack, and then apply it to each short - // flag we generated. - // If "sequences" never altered semantics, then we could just use that - // here to group a short stack. - if self.atis(0, "...") { - self.next(); - seq = seq.into_iter().map(Pattern::repeat).collect(); - } - Ok(seq) - } - - fn flag_long(&mut self) -> Result { - let (atom, arg) = try!(parse_long_equal(self.cur())); - let atom = self.dopt.descs.resolve(&atom); - if self.dopt.descs.contains_key(&atom) { - // Options already exist for this atom, so we must check to make - // sure things are consistent. - let has_arg = self.dopt.has_arg(&atom); - if arg.has_arg() && !has_arg { - // Found `=` in usage, but previous usage of this flag - // didn't specify an argument. - err!("Flag '{}' does not take any arguments.", atom) - } else if !arg.has_arg() && has_arg { - // Didn't find any `=` in usage for this flag, but previous - // usage of this flag specifies an argument. - // So look for `--flag ARG` - try!(self.next_flag_arg(&atom)); - // We don't care about the value of `arg` since options - // already exist. (In which case, the argument value can never - // change.) - } - } - self.add_atom_ifnotexists(arg, &atom); - self.next(); - let pat = if self.dopt.has_repeat(&atom) { - Pattern::repeat(PatAtom(atom)) - } else { - PatAtom(atom) - }; - Ok(self.maybe_repeat(pat)) - } - - fn next_flag_arg(&mut self, atom: &Atom) -> Result<(), String> { - try!(self.next_noeof(&*format!("argument for flag '{}'", atom))); - self.errif_invalid_flag_arg(atom, self.cur()) - } - - fn errif_invalid_flag_arg(&self, atom: &Atom, arg: &str) - -> Result<(), String> { - if !Atom::is_arg(arg) { - err!("Expected argument for flag '{}', but found \ - malformed argument '{}'.", atom, arg) - } - Ok(()) - } - - fn command(&mut self) -> Result { - let atom = Atom::new(self.cur()); - self.add_atom_ifnotexists(Zero, &atom); - self.next(); - Ok(self.maybe_repeat(PatAtom(atom))) - } - - fn positional(&mut self) -> Result { - let atom = Atom::new(self.cur()); - self.add_atom_ifnotexists(Zero, &atom); - self.next(); - Ok(self.maybe_repeat(PatAtom(atom))) - } - - fn add_atom_ifnotexists(&mut self, arg: Argument, atom: &Atom) { - if !self.dopt.descs.contains_key(atom) { - let opts = Options::new(false, arg); - self.dopt.descs.insert(atom.clone(), opts); - } - } - - fn group(&mut self) - -> Result { - try!(self.next_noeof("pattern")); - let pat = try!(self.pattern()); - Ok(self.maybe_repeat(pat)) - } - - fn maybe_repeat(&mut self, pat: Pattern) -> Pattern { - if self.atis(0, "...") { - self.next(); - Pattern::repeat(pat) - } else { - pat - } - } - - fn is_eof(&self) -> bool { - self.curi == self.tokens.len() - } - fn next(&mut self) { - if self.curi == self.tokens.len() { - return - } - self.curi += 1; - } - fn next_noeof(&mut self, expected: &str) -> Result<(), String> { - self.next(); - if self.curi == self.tokens.len() { - err!("Expected {} but reached end of usage pattern.", expected) - } - Ok(()) - } - fn cur(&self) -> &str { - &*self.tokens[self.curi] - } - fn atis(&self, offset: usize, is: &str) -> bool { - let i = self.curi + offset; - i < self.tokens.len() && self.tokens[i] == is - } -} - -#[derive(Clone, Debug)] -enum Pattern { - Alternates(Vec), - Sequence(Vec), - Optional(Vec), - Repeat(Box), - PatAtom(Atom), -} - -#[derive(PartialEq, Eq, Ord, Hash, Clone, Debug)] -pub enum Atom { - Short(char), - Long(String), - Command(String), - Positional(String), -} - -#[derive(Clone, Debug)] -pub struct Options { - /// Set to true if this atom is ever repeated in any context. - /// For positional arguments, non-argument flags and commands, repetition - /// means that they become countable. - /// For flags with arguments, repetition means multiple distinct values - /// can be specified (and are represented as a Vec). - pub repeats: bool, - - /// This specifies whether this atom has any arguments. - /// For commands and positional arguments, this is always Zero. - /// Flags can have zero or one argument, with an optionally default value. - pub arg: Argument, - - /// Whether it shows up in the "options description" second. - pub is_desc: bool, -} - -#[derive(Clone, Debug, PartialEq)] -pub enum Argument { - Zero, - One(Option), // optional default value -} - -impl Pattern { - fn add_options_shortcut(&mut self, par: &Parser) { - fn add(pat: &mut Pattern, all_atoms: &HashSet, par: &Parser) { - match *pat { - Alternates(ref mut ps) | Sequence(ref mut ps) => { - for p in ps.iter_mut() { add(p, all_atoms, par) } - } - Repeat(ref mut p) => add(&mut **p, all_atoms, par), - PatAtom(_) => {} - Optional(ref mut ps) => { - if !ps.is_empty() { - for p in ps.iter_mut() { add(p, all_atoms, par) } - } else { - for atom in par.options_atoms().into_iter() { - if !all_atoms.contains(&atom) { - if par.has_repeat(&atom) { - ps.push(Pattern::repeat(PatAtom(atom))); - } else { - ps.push(PatAtom(atom)); - } - } - } - } - } - } - } - let all_atoms = self.all_atoms(); - add(self, &all_atoms, par); - } - - fn all_atoms(&self) -> HashSet { - fn all_atoms(pat: &Pattern, set: &mut HashSet) { - match *pat { - Alternates(ref ps) | Sequence(ref ps) | Optional(ref ps) => { - for p in ps.iter() { all_atoms(p, set) } - } - Repeat(ref p) => all_atoms(&**p, set), - PatAtom(ref a) => { set.insert(a.clone()); } - } - } - let mut set = HashSet::new(); - all_atoms(self, &mut set); - set - } - - fn tag_repeats(&self, map: &mut SynonymMap) { - fn dotag(p: &Pattern, - rep: bool, - map: &mut SynonymMap, - seen: &mut HashSet) { - match *p { - Alternates(ref ps) => { - // This is a bit tricky. Basically, we don't want the - // existence of an item in mutually exclusive alternations - // to affect whether it repeats or not. - // However, we still need to record seeing each item in - // each alternation. - let fresh = seen.clone(); - for p in ps.iter() { - let mut isolated = fresh.clone(); - dotag(p, rep, map, &mut isolated); - for a in isolated.into_iter() { - seen.insert(a); - } - } - } - Sequence(ref ps) => { - for p in ps.iter() { - dotag(p, rep, map, seen) - } - } - Optional(ref ps) => { - for p in ps.iter() { - dotag(p, rep, map, seen) - } - } - Repeat(ref p) => dotag(&**p, true, map, seen), - PatAtom(ref atom) => { - let opt = map.find_mut(atom).expect("bug: no atom found"); - opt.repeats = opt.repeats || rep || seen.contains(atom); - seen.insert(atom.clone()); - } - } - } - let mut seen = HashSet::new(); - dotag(self, false, map, &mut seen); - } - - fn repeat(p: Pattern) -> Pattern { - match p { - // Normalize [p1 p2]... into the equivalent [p1... p2...]. - Optional(ps) => Optional(ps.into_iter().map(Pattern::repeat).collect()), - p @ Repeat(_) => p, - p => Repeat(Box::new(p)), - } - } -} - -impl Atom { - pub fn new(s: &str) -> Atom { - if Atom::is_short(s) { - Short(s[1..].chars().next().unwrap()) - } else if Atom::is_long(s) { - Long(s[2..].into()) - } else if Atom::is_arg(s) { - if s.starts_with("<") && s.ends_with(">") { - Positional(s[1..s.len()-1].into()) - } else { - Positional(s.into()) - } - } else if Atom::is_cmd(s) { - Command(s.into()) - } else { - panic!("Unknown atom string: '{}'", s) - } - } - - fn is_short(s: &str) -> bool { - lazy_static! { - static ref RE: Regex = regex!(r"^-[^-]\S*$"); - } - RE.is_match(s) - } - - fn is_long(s: &str) -> bool { - lazy_static! { - static ref RE: Regex = regex!(r"^--\S+(?:<[^>]+>)?$"); - } - RE.is_match(s) - } - - fn is_long_argv(s: &str) -> bool { - lazy_static! { - static ref RE: Regex = regex!(r"^--\S+(=.+)?$"); - } - RE.is_match(s) - } - - fn is_arg(s: &str) -> bool { - lazy_static! { - static ref RE: Regex = regex!(r"^(\p{Lu}+|<[^>]+>)$"); - } - RE.is_match(s) - } - - fn is_cmd(s: &str) -> bool { - lazy_static! { - static ref RE: Regex = regex!(r"^(-|--|[^-]\S*)$"); - } - RE.is_match(s) - } - - // Assigns an integer to each variant of Atom. (For easier sorting.) - fn type_as_usize(&self) -> usize { - match *self { - Short(_) => 0, - Long(_) => 1, - Command(_) => 2, - Positional(_) => 3, - } - } -} - -impl PartialOrd for Atom { - fn partial_cmp(&self, other: &Atom) -> Option { - match (self, other) { - (&Short(c1), &Short(c2)) => c1.partial_cmp(&c2), - (&Long(ref s1), &Long(ref s2)) => s1.partial_cmp(s2), - (&Command(ref s1), &Command(ref s2)) => s1.partial_cmp(s2), - (&Positional(ref s1), &Positional(ref s2)) => s1.partial_cmp(s2), - (a1, a2) => a1.type_as_usize().partial_cmp(&a2.type_as_usize()), - } - } -} - -impl fmt::Display for Atom { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Short(c) => write!(f, "-{}", c), - Long(ref s) => write!(f, "--{}", s), - Command(ref s) => write!(f, "{}", s), - Positional(ref s) => { - if s.chars().all(|c| c.is_uppercase()) { - write!(f, "{}", s) - } else { - write!(f, "<{}>", s) - } - } - } - } -} - - -impl Options { - fn new(rep: bool, arg: Argument) -> Options { - Options { repeats: rep, arg: arg, is_desc: false, } - } -} - -impl Argument { - fn has_arg(&self) -> bool { - match *self { - Zero => false, - One(_) => true, - } - } -} - -#[doc(hidden)] -pub struct Argv<'a> { - /// A representation of an argv string as an ordered list of tokens. - /// This contains only positional arguments and commands. - positional: Vec, - /// Same as positional, but contains short and long flags. - /// Each flag may have an argument string. - flags: Vec, - /// Counts the number of times each flag appears. - counts: HashMap, - - // State for parser. - dopt: &'a Parser, - argv: Vec, - curi: usize, - options_first: bool, -} - -#[derive(Clone, Debug)] -struct ArgvToken { - atom: Atom, - arg: Option, -} - -impl<'a> Argv<'a> { - fn new(dopt: &'a Parser, argv: Vec, options_first: bool) - -> Result, String> { - let mut a = Argv { - positional: vec!(), - flags: vec!(), - counts: HashMap::new(), - dopt: dopt, - argv: argv.iter().cloned().collect(), - curi: 0, - options_first: options_first, - }; - try!(a.parse()); - for flag in &a.flags { - match a.counts.entry(flag.atom.clone()) { - Vacant(v) => { v.insert(1); } - Occupied(mut v) => { *v.get_mut() += 1; } - } - } - Ok(a) - } - - fn parse(&mut self) -> Result<(), String> { - let mut seen_double_dash = false; - while self.curi < self.argv.len() { - let do_flags = - !seen_double_dash - && (!self.options_first || self.positional.is_empty()); - - if do_flags && Atom::is_short(self.cur()) { - let stacked: String = self.cur()[1..].into(); - for (i, c) in stacked.chars().enumerate() { - let mut tok = ArgvToken { - atom: self.dopt.descs.resolve(&Short(c)), - arg: None, - }; - if !self.dopt.descs.contains_key(&tok.atom) { - err!("Unknown flag: '{}'", &tok.atom); - } - if !self.dopt.has_arg(&tok.atom) { - self.flags.push(tok); - } else { - let rest = &stacked[i+1..]; - tok.arg = Some( - if rest.is_empty() { - let arg = try!(self.next_arg(&tok.atom)); - arg.into() - } else { - rest.into() - } - ); - self.flags.push(tok); - // We've either produced an error or gobbled up the - // rest of these stacked short flags, so stop. - break - } - } - } else if do_flags && Atom::is_long_argv(self.cur()) { - let (atom, mut arg) = parse_long_equal_argv(self.cur()); - let atom = self.dopt.descs.resolve(&atom); - if !self.dopt.descs.contains_key(&atom) { - return self.err_unknown_flag(&atom) - } - if arg.is_some() && !self.dopt.has_arg(&atom) { - err!("Flag '{}' cannot have an argument, but found '{}'.", - &atom, arg.as_ref().unwrap()) - } else if arg.is_none() && self.dopt.has_arg(&atom) { - try!(self.next_noeof(&*format!("argument for flag '{}'", - &atom))); - arg = Some(self.cur().into()); - } - self.flags.push(ArgvToken { atom: atom, arg: arg }); - } else { - if !seen_double_dash && self.cur() == "--" { - seen_double_dash = true; - } else { - // Yup, we *always* insert a positional argument, which - // means we completely neglect `Command` here. - // This is because we can't tell whether something is a - // `command` or not until we start pattern matching. - let tok = ArgvToken { - atom: Positional(self.cur().into()), - arg: None, - }; - self.positional.push(tok); - } - } - self.next() - } - Ok(()) - } - - fn err_unknown_flag(&self, atom: &Atom) -> Result<(), String> { - use std::usize::MAX; - let mut best = String::new(); - let flag = atom.to_string(); - let mut min = MAX; - - let mut possibles = Vec::new(); - - for (key, _) in self.dopt.descs.synonyms() { - possibles.push(key); - } - - for key in self.dopt.descs.keys() { - possibles.push(key); - } - - for key in &possibles { - match **key { - Long(_) | Command(_) => { - let name = key.to_string(); - let dist = levenshtein(&flag, &name); - if dist < 3 && dist < min { - min = dist; - best = name; - } - } - _ => {} - } - } - if best.is_empty() { - err!("Unknown flag: '{}'", &atom); - } else { - err!("Unknown flag: '{}'. Did you mean '{}'?", &atom, &best) - } - } - - fn cur(&self) -> &str { self.at(0) } - fn at(&self, i: usize) -> &str { - &*self.argv[self.curi + i] - } - fn next(&mut self) { - if self.curi < self.argv.len() { - self.curi += 1 - } - } - fn next_arg(&mut self, atom: &Atom) -> Result<&str, String> { - let expected = format!("argument for flag '{}'", atom); - try!(self.next_noeof(&*expected)); - Ok(self.cur()) - } - fn next_noeof(&mut self, expected: &str) -> Result<(), String> { - self.next(); - if self.curi == self.argv.len() { - err!("Expected {} but reached end of arguments.", expected) - } - Ok(()) - } -} - -impl<'a> fmt::Debug for Argv<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - try!(writeln!(f, "Positional: {:?}", self.positional)); - try!(writeln!(f, "Flags: {:?}", self.flags)); - try!(writeln!(f, "Counts: {:?}", self.counts)); - Ok(()) - } -} - -struct Matcher<'a, 'b:'a> { - argv: &'a Argv<'b>, -} - -#[derive(Clone, Debug, PartialEq)] -struct MState { - argvi: usize, // index into Argv.positional - counts: HashMap, // flags remaining for pattern consumption - max_counts: HashMap, // optional flag appearances - vals: HashMap, -} - -impl MState { - fn fill_value(&mut self, key: Atom, rep: bool, arg: Option) - -> bool { - match (arg, rep) { - (None, false) => { - self.vals.insert(key, Switch(true)); - } - (Some(arg), false) => { - self.vals.insert(key, Plain(Some(arg))); - } - (None, true) => { - match self.vals.entry(key) { - Vacant(v) => { v.insert(Counted(1)); } - Occupied(mut v) => { - match *v.get_mut() { - Counted(ref mut c) => { *c += 1; } - _ => return false, - } - } - } - } - (Some(arg), true) => { - match self.vals.entry(key) { - Vacant(v) => { v.insert(List(vec!(arg))); } - Occupied(mut v) => { - match *v.get_mut() { - List(ref mut vs) => vs.push(arg), - _ => return false, - } - } - } - } - } - true - } - - fn add_value(&mut self, opts: &Options, - spec: &Atom, atom: &Atom, arg: &Option) -> bool { - assert!(opts.arg.has_arg() == arg.is_some(), - "'{:?}' should have an argument but doesn't", atom); - match *atom { - Short(_) | Long(_) => { - self.fill_value(spec.clone(), opts.repeats, arg.clone()) - } - Positional(ref v) => { - assert!(!opts.arg.has_arg()); - self.fill_value(spec.clone(), opts.repeats, Some(v.clone())) - } - Command(_) => { - assert!(!opts.arg.has_arg()); - self.fill_value(spec.clone(), opts.repeats, None) - } - } - } - - fn use_flag(&mut self, flag: &Atom) -> bool { - match self.max_counts.entry(flag.clone()) { - Vacant(v) => { v.insert(0); } - Occupied(_) => {} - } - match self.counts.entry(flag.clone()) { - Vacant(_) => { false } - Occupied(mut v) => { - let c = v.get_mut(); - if *c == 0 { - false - } else { - *c -= 1; - true - } - } - } - } - - fn use_optional_flag(&mut self, flag: &Atom) { - match self.max_counts.entry(flag.clone()) { - Vacant(v) => { v.insert(1); } - Occupied(mut v) => { *v.get_mut() += 1; } - } - } - - fn match_cmd_or_posarg(&mut self, spec: &Atom, argv: &ArgvToken) - -> Option { - match (spec, &argv.atom) { - (_, &Command(_)) => { - // This is impossible because the argv parser doesn't know - // how to produce `Command` values. - unreachable!() - } - (&Command(ref n1), &Positional(ref n2)) if n1 == n2 => { - // Coerce a positional to a command because the pattern - // demands it and the positional argument matches it. - self.argvi += 1; - Some(ArgvToken { atom: spec.clone(), arg: None }) - } - (&Positional(_), _) => { - self.argvi += 1; - Some(argv.clone()) - } - _ => None, - } - } -} - -impl<'a, 'b> Matcher<'a, 'b> { - fn matches(argv: &'a Argv, pat: &Pattern) - -> Option> { - let m = Matcher { argv: argv }; - let init = MState { - argvi: 0, - counts: argv.counts.clone(), - max_counts: HashMap::new(), - vals: HashMap::new(), - }; - m.states(pat, &init) - .into_iter() - .filter(|s| m.state_consumed_all_argv(s)) - .filter(|s| m.state_has_valid_flags(s)) - .filter(|s| m.state_valid_num_flags(s)) - .collect::>() - .into_iter() - .next() - .map(|mut s| { - m.add_flag_values(&mut s); - m.add_default_values(&mut s); - - // Build a synonym map so that it's easier to look up values. - let mut synmap: SynonymMap = - s.vals.into_iter() - .map(|(k, v)| (k.to_string(), v)) - .collect(); - for (from, to) in argv.dopt.descs.synonyms() { - let (from, to) = (from.to_string(), to.to_string()); - if synmap.contains_key(&to) { - synmap.insert_synonym(from, to); - } - } - synmap - }) - } - - fn token_from(&self, state: &MState) -> Option<&ArgvToken> { - self.argv.positional.get(state.argvi) - } - - fn add_value(&self, state: &mut MState, - atom_spec: &Atom, atom: &Atom, arg: &Option) - -> bool { - let opts = self.argv.dopt.descs.get(atom_spec); - state.add_value(opts, atom_spec, atom, arg) - } - - fn add_flag_values(&self, state: &mut MState) { - for tok in &self.argv.flags { - self.add_value(state, &tok.atom, &tok.atom, &tok.arg); - } - } - - fn add_default_values(&self, state: &mut MState) { - lazy_static! { - static ref SPLIT_SPACE: Regex = regex!(r"\s+"); - } - let vs = &mut state.vals; - for (a, opts) in self.argv.dopt.descs.iter() { - if vs.contains_key(a) { - continue - } - let atom = a.clone(); - match (opts.repeats, &opts.arg) { - (false, &Zero) => { - match *a { - Positional(_) => vs.insert(atom, Plain(None)), - _ => vs.insert(atom, Switch(false)), - }; - } - (true, &Zero) => { - match *a { - Positional(_) => vs.insert(atom, List(vec!())), - _ => vs.insert(atom, Counted(0)), - }; - } - (false, &One(None)) => { vs.insert(atom, Plain(None)); } - (true, &One(None)) => { vs.insert(atom, List(vec!())); } - (false, &One(Some(ref v))) => { - vs.insert(atom, Plain(Some(v.clone()))); - } - (true, &One(Some(ref v))) => { - let words = SPLIT_SPACE - .split(v) - .map(|s| s.to_owned()) - .collect(); - vs.insert(atom, List(words)); - } - } - } - } - - fn state_consumed_all_argv(&self, state: &MState) -> bool { - self.argv.positional.len() == state.argvi - } - - fn state_has_valid_flags(&self, state: &MState) -> bool { - self.argv.counts.keys().all(|flag| state.max_counts.contains_key(flag)) - } - - fn state_valid_num_flags(&self, state: &MState) -> bool { - state.counts.iter().all( - |(flag, count)| count <= &state.max_counts[flag]) - } - - fn states(&self, pat: &Pattern, init: &MState) -> Vec { - match *pat { - Alternates(ref ps) => { - let mut alt_states = vec!(); - for p in ps.iter() { - alt_states.extend(self.states(p, init).into_iter()); - } - alt_states - } - Sequence(ref ps) => { - let (mut states, mut next) = (vec!(), vec!()); - let mut iter = ps.iter(); - match iter.next() { - None => return vec!(init.clone()), - Some(p) => states.extend(self.states(p, init).into_iter()), - } - for p in iter { - for s in states.into_iter() { - next.extend(self.states(p, &s).into_iter()); - } - states = vec!(); - states.extend(next.into_iter()); - next = vec!(); - } - states - } - Optional(ref ps) => { - let mut base = init.clone(); - let mut noflags = vec!(); - for p in ps.iter() { - match p { - // Prevent exponential growth in cases like [--flag...] - // See https://github.com/docopt/docopt.rs/issues/195 - &Repeat(ref b) => match &**b { - &PatAtom(ref a @ Short(_)) - | &PatAtom(ref a @ Long(_)) => { - let argv_count = self.argv.counts.get(a) - .map_or(0, |&x| x); - let max_count = base.max_counts.get(a) - .map_or(0, |&x| x); - if argv_count > max_count { - for _ in max_count..argv_count { - base.use_optional_flag(a); - } - } - } - _ => { - noflags.push(p); - } - }, - &PatAtom(ref a @ Short(_)) - | &PatAtom(ref a @ Long(_)) => { - let argv_count = self.argv.counts.get(a) - .map_or(0, |&x| x); - let max_count = base.max_counts.get(a) - .map_or(0, |&x| x); - if argv_count > max_count { - base.use_optional_flag(a); - } - } - other => { - noflags.push(other); - } - } - } - let mut states = vec!(); - self.all_option_states(&base, &mut states, &*noflags); - states - } - Repeat(ref p) => { match &**p { - &PatAtom(ref a @ Short(_)) - | &PatAtom(ref a @ Long(_)) => { - let mut bases = self.states(&**p, init); - for base in &mut bases { - let argv_count = self.argv.counts.get(a) - .map_or(0, |&x| x); - let max_count = base.max_counts.get(a) - .map_or(0, |&x| x); - if argv_count > max_count { - for _ in max_count..argv_count { - base.use_optional_flag(a); - } - } - } - bases - } - _ => { - let mut grouped_states = vec!(self.states(&**p, init)); - loop { - let mut nextss = vec!(); - for s in grouped_states.last().unwrap().iter() { - nextss.extend( - self.states(&**p, s) - .into_iter() - .filter(|snext| snext != s)); - } - if nextss.is_empty() { - break - } - grouped_states.push(nextss); - } - grouped_states - .into_iter() - .flat_map(|ss| ss.into_iter()) - .collect::>() - } - }} - PatAtom(ref atom) => { - let mut state = init.clone(); - match *atom { - Short(_) | Long(_) => { - if !state.use_flag(atom) { - return vec!() - } - } - Command(_) | Positional(_) => { - let tok = - match self.token_from(init) { - None => return vec!(), - Some(tok) => tok, - }; - let tok = - match state.match_cmd_or_posarg(atom, tok) { - None => return vec!(), - Some(tok) => tok, - }; - if !self.add_value(&mut state, atom, - &tok.atom, &tok.arg) { - return vec!() - } - } - } - vec!(state) - } - } - } - - fn all_option_states(&self, base: &MState, states: &mut Vec, - pats: &[&Pattern]) { - if pats.is_empty() { - states.push(base.clone()); - } else { - let (pat, rest) = (*pats.first().unwrap(), &pats[1..]); - for s in self.states(pat, base).into_iter() { - self.all_option_states(&s, states, rest); - } - // Order is important here! This must come after the loop above - // because we prefer presence over absence. The first state wins. - self.all_option_states(base, states, &pats[1..]); - } - } -} - -// Tries to parse a long flag of the form '--flag[=arg]' and returns a tuple -// with the flag atom and whether there is an argument or not. -// If '=arg' exists and 'arg' isn't a valid argument, an error is returned. -fn parse_long_equal(flag: &str) -> Result<(Atom, Argument), String> { - lazy_static! { - static ref LONG_EQUAL: Regex = regex!("^(?P[^=]+)=(?P.+)$"); - } - match LONG_EQUAL.captures(flag) { - None => Ok((Atom::new(flag), Zero)), - Some(cap) => { - let arg = cap.name("arg").unwrap_or(""); - if !Atom::is_arg(arg) { - err!("Argument '{}' for flag '{}' is not in the \ - form ARG or .", flag, arg) - } - Ok((Atom::new(cap.name("name").unwrap_or("")), One(None))) - } - } -} - -fn parse_long_equal_argv(flag: &str) -> (Atom, Option) { - lazy_static! { - static ref LONG_EQUAL: Regex = regex!("^(?P[^=]+)=(?P.*)$"); - } - match LONG_EQUAL.captures(flag) { - None => (Atom::new(flag), None), - Some(cap) => ( - Atom::new(cap.name("name").unwrap_or("")), - Some(cap.name("arg").unwrap_or("").into()), - ), - } -} - -// Tokenizes a usage pattern. -// Beware: regex hack ahead. Tokenizes based on whitespace separated words. -// It first normalizes `[xyz]` -> `[ xyz ]` so that delimiters are tokens. -// Similarly for `...`, `(`, `)` and `|`. -// One hitch: `--flag=` is allowed, so we use a regex to pick out -// words. -fn pattern_tokens(pat: &str) -> Vec { - lazy_static! { - static ref NORMALIZE: Regex = regex!(r"\.\.\.|\[|\]|\(|\)|\|"); - static ref WORDS: Regex = regex!(r"--\S+?=<[^>]+>|<[^>]+>|\S+"); - } - - let pat = NORMALIZE.replace_all(pat.trim(), " $0 "); - let mut words = vec!(); - for cap in WORDS.captures_iter(&*pat) { - words.push(cap.at(0).unwrap_or("").into()); - } - words -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/src/synonym.rs cargo-0.19.0/vendor/docopt-0.6.86/src/synonym.rs --- cargo-0.17.0/vendor/docopt-0.6.86/src/synonym.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/src/synonym.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ -use std::collections::HashMap; -use std::collections::hash_map::{Iter, Keys}; -use std::fmt::Debug; -use std::hash::Hash; -use std::iter::{FromIterator, IntoIterator}; -use std::mem; - -#[derive(Clone)] -pub struct SynonymMap { - vals: HashMap, - syns: HashMap, -} - -impl SynonymMap { - pub fn new() -> SynonymMap { - SynonymMap { - vals: HashMap::new(), - syns: HashMap::new(), - } - } - - pub fn insert_synonym(&mut self, from: K, to: K) -> bool { - assert!(self.vals.contains_key(&to)); - self.syns.insert(from, to).is_none() - } - - pub fn keys(&self) -> Keys { - self.vals.keys() - } - - pub fn iter(&self) -> Iter { - self.vals.iter() - } - - pub fn synonyms(&self) -> Iter { - self.syns.iter() - } - - pub fn find(&self, k: &K) -> Option<&V> { - self.with_key(k, |k| self.vals.get(k)) - } - - pub fn contains_key(&self, k: &K) -> bool { - self.with_key(k, |k| self.vals.contains_key(k)) - } - - pub fn len(&self) -> usize { - self.vals.len() - } - - fn with_key(&self, k: &K, with: F) -> T where F: FnOnce(&K) -> T { - if self.syns.contains_key(k) { - with(&self.syns[k]) - } else { - with(k) - } - } -} - -impl SynonymMap { - pub fn resolve(&self, k: &K) -> K { - self.with_key(k, |k| k.clone()) - } - - pub fn get<'a>(&'a self, k: &K) -> &'a V { - self.find(k).unwrap() - } - - pub fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> { - if self.syns.contains_key(k) { - self.vals.get_mut(&self.syns[k]) - } else { - self.vals.get_mut(k) - } - } - - pub fn swap(&mut self, k: K, mut new: V) -> Option { - if self.syns.contains_key(&k) { - let old = self.vals.get_mut(&k).unwrap(); - mem::swap(old, &mut new); - Some(new) - } else { - self.vals.insert(k, new) - } - } - - pub fn insert(&mut self, k: K, v: V) -> bool { - self.swap(k, v).is_none() - } -} - -impl FromIterator<(K, V)> for SynonymMap { - fn from_iter>(iter: T) -> SynonymMap { - let mut map = SynonymMap::new(); - for (k, v) in iter { - map.insert(k, v); - } - map - } -} - -impl Debug for SynonymMap { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - try!(self.vals.fmt(f)); - write!(f, " (synomyns: {:?})", self.syns) - } -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/src/test/mod.rs cargo-0.19.0/vendor/docopt-0.6.86/src/test/mod.rs --- cargo-0.17.0/vendor/docopt-0.6.86/src/test/mod.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/src/test/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -use std::collections::HashMap; -use {Docopt, ArgvMap}; -use Value::{self, Switch, Plain}; - -fn get_args(doc: &str, argv: &[&'static str]) -> ArgvMap { - let dopt = match Docopt::new(doc) { - Err(err) => panic!("Invalid usage: {}", err), - Ok(dopt) => dopt, - }; - match dopt.argv(vec!["cmd"].iter().chain(argv.iter())).parse() { - Err(err) => panic!("{}", err), - Ok(vals) => vals, - } -} - -fn map_from_alist(alist: Vec<(&'static str, Value)>) - -> HashMap { - alist.into_iter().map(|(k, v)| (k.to_string(), v)).collect() -} - -fn same_args(expected: &HashMap, got: &ArgvMap) { - for (k, ve) in expected.iter() { - match got.map.find(k) { - None => panic!("EXPECTED has '{}' but GOT does not.", k), - Some(vg) => { - assert!(ve == vg, - "{}: EXPECTED = '{:?}' != '{:?}' = GOT", k, ve, vg) - } - } - } - for (k, vg) in got.map.iter() { - match got.map.find(k) { - None => panic!("GOT has '{}' but EXPECTED does not.", k), - Some(ve) => { - assert!(vg == ve, - "{}: GOT = '{:?}' != '{:?}' = EXPECTED", k, vg, ve) - } - } - } -} - -macro_rules! test_expect( - ($name:ident, $doc:expr, $args:expr, $expected:expr) => ( - #[test] - fn $name() { - let vals = get_args($doc, $args); - let expected = map_from_alist($expected); - same_args(&expected, &vals); - } - ); -); - -macro_rules! test_user_error( - ($name:ident, $doc:expr, $args:expr) => ( - #[test] - #[should_panic] - fn $name() { get_args($doc, $args); } - ); -); - -test_expect!(test_issue_13, "Usage: prog file ", &["file", "file"], - vec![("file", Switch(true)), - ("", Plain(Some("file".to_string())))]); - -test_expect!(test_issue_129, "Usage: prog [options] - -Options: - --foo ARG Foo foo.", - &["--foo=a b"], - vec![("--foo", Plain(Some("a b".into())))]); - -#[test] -fn regression_issue_12() { - const USAGE: &'static str = " - Usage: - whisper info - whisper update - whisper mark - "; - - #[derive(RustcDecodable, Debug)] - struct Args { - arg_file: String, - cmd_info: bool, - cmd_update: bool, - arg_timestamp: u64, - arg_value: f64 - } - - let dopt: Args = Docopt::new(USAGE).unwrap() - .argv(&["whisper", "mark", "./p/blah", "100"]) - .decode().unwrap(); - assert_eq!(dopt.arg_timestamp, 0); -} - -#[test] -fn regression_issue_195() { - const USAGE: &'static str = " - Usage: - slow [-abcdefghijklmnopqrs...] - "; - - let argv = &["slow", "-abcdefghijklmnopqrs"]; - let dopt : Docopt = Docopt::new(USAGE).unwrap().argv(argv); - - dopt.parse().unwrap(); -} - - -mod testcases; -mod suggestions; diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/src/test/suggestions.rs cargo-0.19.0/vendor/docopt-0.6.86/src/test/suggestions.rs --- cargo-0.17.0/vendor/docopt-0.6.86/src/test/suggestions.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/src/test/suggestions.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -use {Docopt, Error}; - -fn get_suggestion(doc: &str, argv: &[&'static str]) -> Error { - let dopt = - match Docopt::new(doc) { - Err(err) => panic!("Invalid usage: {}", err), - Ok(dopt) => dopt, - }; - let mut argv: Vec<_> = argv.iter().map(|x| x.to_string()).collect(); - argv.insert(0, "prog".to_string()); - match dopt.argv(argv.into_iter()).parse() { - Err(err) => err, - Ok(_) => panic!("Should have been a user error"), - } -} - -macro_rules! test_suggest( - ($name:ident, $doc:expr, $args:expr, $expected:expr) => ( - #[test] - fn $name() { - let sg = get_suggestion($doc, $args); - println!("{}", sg); - match sg { - Error::WithProgramUsage(e, _) => { - match *e { - Error::Argv(msg) => { - println!("{:?}",msg); - assert_eq!(msg, $expected); - } - err => panic!("Error other than argv: {:?}", err) - } - }, - _ => panic!("Error without program usage") - } - } - ); -); - - -test_suggest!(test_suggest_1, "Usage: prog [--release]", &["--releas"], "Unknown flag: '--releas'. Did you mean '--release'?"); - -test_suggest!(test_suggest_2, -"Usage: prog [-a] - prog [-a] ... - prog [-e] - Options: - -a, --archive Copy everything. -", -&["-d"], "Unknown flag: '-d'"); - - -test_suggest!(test_suggest_3, -"Usage: prog [-a] - prog [-a] ... - prog [-e] - Options: - -a, --archive Copy everything. - -e, --export Export all the things. -", -&["--expotr"], "Unknown flag: '--expotr'. Did you mean '--export'?"); - - -test_suggest!(test_suggest_4, -"Usage: prog [--import] [--complete] -", -&["--mport", "--complte"], "Unknown flag: '--mport'. Did you mean '--import'?"); - -test_suggest!(test_suggest_5, -"Usage: prog [--import] [--complete] -", -&["--import", "--complte"], "Unknown flag: '--complte'. Did you mean '--complete'?"); - diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/src/test/testcases.docopt cargo-0.19.0/vendor/docopt-0.6.86/src/test/testcases.docopt --- cargo-0.17.0/vendor/docopt-0.6.86/src/test/testcases.docopt 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/src/test/testcases.docopt 1970-01-01 00:00:00.000000000 +0000 @@ -1,1122 +0,0 @@ -r"""Usage: prog - -""" -$ prog -{} - -$ prog --xxx -"user-error" - - -r"""Usage: prog [options] - -Options: -a All. - -""" -$ prog -{"-a": false} - -$ prog -a -{"-a": true} - -$ prog -x -"user-error" - - -r"""Usage: prog [options] - -Options: --all All. - -""" -$ prog -{"--all": false} - -$ prog --all -{"--all": true} - -$ prog --xxx -"user-error" - - -r"""Usage: prog [options] - -Options: -v, --verbose Verbose. - -""" -$ prog --verbose -{"--verbose": true} - -$ prog --ver -"user-error" - -$ prog -v -{"--verbose": true} - - -r"""Usage: prog [options] - -Options: -p PATH - -""" -$ prog -p home/ -{"-p": "home/"} - -$ prog -phome/ -{"-p": "home/"} - -$ prog -p -"user-error" - - -r"""Usage: prog [options] - -Options: --path - -""" -$ prog --path home/ -{"--path": "home/"} - -$ prog --path=home/ -{"--path": "home/"} - -$ prog --pa home/ -"user-error" - -$ prog --pa=home/ -"user-error" - -$ prog --path -"user-error" - - -r"""Usage: prog [options] - -Options: -p PATH, --path= Path to files. - -""" -$ prog -proot -{"--path": "root"} - - -r"""Usage: prog [options] - -Options: -p --path PATH Path to files. - -""" -$ prog -p root -{"--path": "root"} - -$ prog --path root -{"--path": "root"} - - -r"""Usage: prog [options] - -Options: - -p PATH Path to files [default: ./] - -""" -$ prog -{"-p": "./"} - -$ prog -phome -{"-p": "home"} - - -r"""UsAgE: prog [options] - -OpTiOnS: --path= Path to files - [dEfAuLt: /root] - -""" -$ prog -{"--path": "/root"} - -$ prog --path=home -{"--path": "home"} - - -r"""usage: prog [options] - -options: - -a Add - -r Remote - -m Message - -""" -$ prog -a -r -m Hello -{"-a": true, - "-r": true, - "-m": "Hello"} - -$ prog -armyourass -{"-a": true, - "-r": true, - "-m": "yourass"} - -$ prog -a -r -{"-a": true, - "-r": true, - "-m": null} - - -r"""Usage: prog [options] - -Options: --version - --verbose - -""" -$ prog --version -{"--version": true, - "--verbose": false} - -$ prog --verbose -{"--version": false, - "--verbose": true} - -$ prog --ver -"user-error" - -$ prog --verb -"user-error" - - -r"""usage: prog [-a -r -m ] - -options: - -a Add - -r Remote - -m Message - -""" -$ prog -armyourass -{"-a": true, - "-r": true, - "-m": "yourass"} - - -r"""usage: prog [-armMSG] - -options: -a Add - -r Remote - -m Message - -""" -$ prog -a -r -m Hello -{"-a": true, - "-r": true, - "-m": "Hello"} - - -r"""usage: prog -a -b - -options: - -a - -b - -""" -$ prog -a -b -{"-a": true, "-b": true} - -$ prog -b -a -{"-a": true, "-b": true} - -$ prog -a -"user-error" - -$ prog -"user-error" - - -r"""usage: prog (-a -b) - -options: -a - -b - -""" -$ prog -a -b -{"-a": true, "-b": true} - -$ prog -b -a -{"-a": true, "-b": true} - -$ prog -a -"user-error" - -$ prog -"user-error" - - -r"""usage: prog [-a] -b - -options: -a - -b - -""" -$ prog -a -b -{"-a": true, "-b": true} - -$ prog -b -a -{"-a": true, "-b": true} - -$ prog -a -"user-error" - -$ prog -b -{"-a": false, "-b": true} - -$ prog -"user-error" - - -r"""usage: prog [(-a -b)] - -options: -a - -b - -""" -$ prog -a -b -{"-a": true, "-b": true} - -$ prog -b -a -{"-a": true, "-b": true} - -$ prog -a -"user-error" - -$ prog -b -"user-error" - -$ prog -{"-a": false, "-b": false} - - -r"""usage: prog (-a|-b) - -options: -a - -b - -""" -$ prog -a -b -"user-error" - -$ prog -"user-error" - -$ prog -a -{"-a": true, "-b": false} - -$ prog -b -{"-a": false, "-b": true} - - -r"""usage: prog [ -a | -b ] - -options: -a - -b - -""" -$ prog -a -b -"user-error" - -$ prog -{"-a": false, "-b": false} - -$ prog -a -{"-a": true, "-b": false} - -$ prog -b -{"-a": false, "-b": true} - - -r"""usage: prog """ -$ prog 10 -{"": "10"} - -$ prog 10 20 -"user-error" - -$ prog -"user-error" - - -r"""usage: prog []""" -$ prog 10 -{"": "10"} - -$ prog 10 20 -"user-error" - -$ prog -{"": null} - - -r"""usage: prog """ -$ prog 10 20 40 -{"": "10", "": "20", "": "40"} - -$ prog 10 20 -"user-error" - -$ prog -"user-error" - - -r"""usage: prog [ ]""" -$ prog 10 20 40 -{"": "10", "": "20", "": "40"} - -$ prog 10 20 -{"": "10", "": "20", "": null} - -$ prog -"user-error" - - -r"""usage: prog [ | ]""" -$ prog 10 20 40 -"user-error" - -$ prog 20 40 -{"": null, "": "20", "": "40"} - -$ prog -{"": null, "": null, "": null} - - -r"""usage: prog ( --all | ) - -options: - --all - -""" -$ prog 10 --all -{"": "10", "--all": true, "": null} - -$ prog 10 -{"": null, "--all": false, "": "10"} - -$ prog -"user-error" - - -r"""usage: prog [ ]""" -$ prog 10 20 -{"": ["10", "20"]} - -$ prog 10 -{"": ["10"]} - -$ prog -{"": []} - - -r"""usage: prog [( )]""" -$ prog 10 20 -{"": ["10", "20"]} - -$ prog 10 -"user-error" - -$ prog -{"": []} - - -r"""usage: prog NAME...""" -$ prog 10 20 -{"NAME": ["10", "20"]} - -$ prog 10 -{"NAME": ["10"]} - -$ prog -"user-error" - - -r"""usage: prog [NAME]...""" -$ prog 10 20 -{"NAME": ["10", "20"]} - -$ prog 10 -{"NAME": ["10"]} - -$ prog -{"NAME": []} - - -r"""usage: prog [NAME...]""" -$ prog 10 20 -{"NAME": ["10", "20"]} - -$ prog 10 -{"NAME": ["10"]} - -$ prog -{"NAME": []} - - -r"""usage: prog [NAME [NAME ...]]""" -$ prog 10 20 -{"NAME": ["10", "20"]} - -$ prog 10 -{"NAME": ["10"]} - -$ prog -{"NAME": []} - - -r"""usage: prog (NAME | --foo NAME) - -options: --foo - -""" -$ prog 10 -{"NAME": "10", "--foo": false} - -$ prog --foo 10 -{"NAME": "10", "--foo": true} - -$ prog --foo=10 -"user-error" - - -r"""usage: prog (NAME | --foo) [--bar | NAME] - -options: --foo -options: --bar - -""" -$ prog 10 -{"NAME": ["10"], "--foo": false, "--bar": false} - -$ prog 10 20 -{"NAME": ["10", "20"], "--foo": false, "--bar": false} - -$ prog --foo --bar -{"NAME": [], "--foo": true, "--bar": true} - - -r"""Naval Fate. - -Usage: - prog ship new ... - prog ship [] move [--speed=] - prog ship shoot - prog mine (set|remove) [--moored|--drifting] - prog -h | --help - prog --version - -Options: - -h --help Show this screen. - --version Show version. - --speed= Speed in knots [default: 10]. - --moored Mored (anchored) mine. - --drifting Drifting mine. - -""" -$ prog ship Guardian move 150 300 --speed=20 -{"--drifting": false, - "--help": false, - "--moored": false, - "--speed": "20", - "--version": false, - "": ["Guardian"], - "": "150", - "": "300", - "mine": false, - "move": true, - "new": false, - "remove": false, - "set": false, - "ship": true, - "shoot": false} - - -r"""usage: prog --hello""" -$ prog --hello -{"--hello": true} - - -r"""usage: prog [--hello=]""" -$ prog -{"--hello": null} - -$ prog --hello wrld -{"--hello": "wrld"} - - -r"""usage: prog [-o]""" -$ prog -{"-o": false} - -$ prog -o -{"-o": true} - - -r"""usage: prog [-opr]""" -$ prog -op -{"-o": true, "-p": true, "-r": false} - - -r"""usage: prog --aabb | --aa""" -$ prog --aa -{"--aabb": false, "--aa": true} - -$ prog --a -"user-error" # not a unique prefix - -# -# Counting number of flags -# - -r"""Usage: prog -v""" -$ prog -v -{"-v": true} - - -r"""Usage: prog [-v -v]""" -$ prog -{"-v": 0} - -$ prog -v -{"-v": 1} - -$ prog -vv -{"-v": 2} - - -r"""Usage: prog -v ...""" -$ prog -"user-error" - -$ prog -v -{"-v": 1} - -$ prog -vv -{"-v": 2} - -$ prog -vvvvvv -{"-v": 6} - - -r"""Usage: prog [-v | -vv | -vvv] - -This one is probably most readable user-friednly variant. - -""" -$ prog -{"-v": 0} - -$ prog -v -{"-v": 1} - -$ prog -vv -{"-v": 2} - -$ prog -vvvv -"user-error" - - -r"""usage: prog [--ver --ver]""" -$ prog --ver --ver -{"--ver": 2} - - -# -# Counting commands -# - -r"""usage: prog [go]""" -$ prog go -{"go": true} - - -r"""usage: prog [go go]""" -$ prog -{"go": 0} - -$ prog go -{"go": 1} - -$ prog go go -{"go": 2} - -$ prog go go go -"user-error" - -r"""usage: prog go...""" -$ prog go go go go go -{"go": 5} - -# -# [options] does not include options from usage-pattern -# -r"""usage: prog [options] [-a] - -options: -a - -b -""" -$ prog -a -{"-a": true, "-b": false} - -$ prog -aa -"user-error" - -# -# Test [options] shourtcut -# - -r"""Usage: prog [options] A - -Options: - -q Be quiet - -v Be verbose. - -""" -$ prog arg -{"A": "arg", "-v": false, "-q": false} - -$ prog -v arg -{"A": "arg", "-v": true, "-q": false} - -$ prog -q arg -{"A": "arg", "-v": false, "-q": true} - -# -# Test single dash -# - -r"""usage: prog [-]""" - -$ prog - -{"-": true} - -$ prog -{"-": false} - -# -# If argument is repeated, its value should always be a list -# - -r"""usage: prog [NAME [NAME ...]]""" - -$ prog a b -{"NAME": ["a", "b"]} - -$ prog -{"NAME": []} - -# -# Option's argument defaults to null/None -# - -r"""usage: prog [options] - -options: - -a Add - -m Message - -""" -$ prog -a -{"-m": null, "-a": true} - -# -# Test options without description -# - -r"""usage: prog --hello""" -$ prog --hello -{"--hello": true} - -r"""usage: prog [--hello=]""" -$ prog -{"--hello": null} - -$ prog --hello wrld -{"--hello": "wrld"} - -r"""usage: prog [-o]""" -$ prog -{"-o": false} - -$ prog -o -{"-o": true} - -r"""usage: prog [-opr]""" -$ prog -op -{"-o": true, "-p": true, "-r": false} - -r"""usage: git [-v | --verbose]""" -$ prog -v -{"-v": true, "--verbose": false} - -r"""usage: git remote [-v | --verbose]""" -$ prog remote -v -{"remote": true, "-v": true, "--verbose": false} - -# -# Test empty usage pattern -# - -r"""usage: prog""" -$ prog -{} - -r"""usage: prog - prog -""" -$ prog 1 2 -{"": "1", "": "2"} - -$ prog -{"": null, "": null} - -r"""usage: prog - prog -""" -$ prog -{"": null, "": null} - -# -# Option's argument should not capture default value from usage pattern -# - -r"""usage: prog [--file=]""" -$ prog -{"--file": null} - -r"""usage: prog [--file=] - -options: --file - -""" -$ prog -{"--file": null} - -r"""Usage: prog [-a ] - -Options: -a, --address TCP address [default: localhost:6283]. - -""" -$ prog -{"--address": "localhost:6283"} - -# -# If option with argument could be repeated, -# its arguments should be accumulated into a list -# - -r"""usage: prog --long= ...""" - -$ prog --long one -{"--long": ["one"]} - -$ prog --long one --long two -{"--long": ["one", "two"]} - -# -# Test multiple elements repeated at once -# - -r"""usage: prog (go --speed=)...""" -$ prog go left --speed=5 go right --speed=9 -{"go": 2, "": ["left", "right"], "--speed": ["5", "9"]} - -# -# Required options should work with option shortcut -# - -r"""usage: prog [options] -a - -options: -a - -""" -$ prog -a -{"-a": true} - -# -# If option could be repeated its defaults should be split into a list -# - -r"""usage: prog [-o ]... - -options: -o [default: x] - -""" -$ prog -o this -o that -{"-o": ["this", "that"]} - -$ prog -{"-o": ["x"]} - -r"""usage: prog [-o ]... - -options: -o [default: x y] - -""" -$ prog -o this -{"-o": ["this"]} - -$ prog -{"-o": ["x", "y"]} - -# -# Test stacked option's argument -# - -r"""usage: prog -pPATH - -options: -p PATH - -""" -$ prog -pHOME -{"-p": "HOME"} - -# -# Issue 56: Repeated mutually exclusive args give nested lists sometimes -# - -r"""Usage: foo (--xx=X|--yy=Y)...""" -$ prog --xx=1 --yy=2 -{"--xx": ["1"], "--yy": ["2"]} - -# -# POSIXly correct tokenization -# - -r"""usage: prog []""" -$ prog f.txt -{"": "f.txt"} - -r"""usage: prog [--input=]...""" -$ prog --input a.txt --input=b.txt -{"--input": ["a.txt", "b.txt"]} - -# -# Issue 85: `[options]` shourtcut with multiple subcommands -# - -r"""usage: prog good [options] - prog fail [options] - -options: --loglevel=N - -""" -$ prog fail --loglevel 5 -{"--loglevel": "5", "fail": true, "good": false} - -# -# Usage-section syntax -# - -r"""usage:prog --foo""" -$ prog --foo -{"--foo": true} - -r"""PROGRAM USAGE: prog --foo""" -$ prog --foo -{"--foo": true} - -r"""Usage: prog --foo - prog --bar -NOT PART OF SECTION""" -$ prog --foo -{"--foo": true, "--bar": false} - -r"""Usage: - prog --foo - prog --bar - -NOT PART OF SECTION""" -$ prog --foo -{"--foo": true, "--bar": false} - -r"""Usage: - prog --foo - prog --bar -NOT PART OF SECTION""" -$ prog --foo -{"--foo": true, "--bar": false} - -# -# Options-section syntax -# - -r"""Usage: prog [options] - -global options: --foo -local options: --baz - --bar -other options: - --egg - --spam --not-an-option- - -""" -$ prog --bar --egg -{"--bar": true, "--egg": true, "--spam": false} - -r"""Usage: prog [-a] [--] [...]""" -$ program -a -{"-a": true, "": []} - -r"""Usage: prog [-a] [--] [...]""" -$ program -- -{"-a": false, "": []} - -r"""Usage: prog [-a] [--] [...]""" -$ program -a -- -b -{"-a": true, "": ["-b"]} - -r"""Usage: prog [-a] [--] [...]""" -$ program -a -- -a -{"-a": true, "": ["-a"]} - -r"""Usage: prog [-a] [--] [...]""" -$ program -- -a -{"-a": false, "": ["-a"]} - -r"""Usage: prog test [options] [--] [...]""" -$ program test a -- -b -{"": ["a", "-b"]} - -r"""Usage: prog test [options] [--] [...]""" -$ program test -- -b -{"": ["-b"]} - -r"""Usage: prog test [options] [--] [...]""" -$ program test a -b -"user-error" - -r"""Usage: prog test [options] [--] [...]""" -$ program test -- -b -- -{"": ["-b", "--"]} - -r"""Usage: prog [options] - -Options: - -a ... Foo -""" -$ program -{"-a": 0} -$ program -a -{"-a": 1} -$ program -a -a -{"-a": 2} -$ program -aa -{"-a": 2} -$ program -a -a -a -{"-a": 3} -$ program -aaa -{"-a": 3} - -r"""Usage: prog [options] - -Options: - -a, --all ... Foo -""" -$ program -{"-a": 0} -$ program -a -{"-a": 1} -$ program -a --all -{"-a": 2} -$ program -aa --all -{"-a": 3} -$ program --all -{"-a": 1} -$ program --all --all -{"-a": 2} - -r"""Usage: prog [options] - -Options: - -a, --all ARG ... Foo -""" -$ program -{"-a": []} -$ program -a 1 -{"-a": ["1"]} -$ program -a 2 --all 3 -{"-a": ["2", "3"]} -$ program -a4 -a5 --all 6 -{"-a": ["4", "5", "6"]} -$ program --all 7 -{"-a": ["7"]} -$ program --all 8 --all 9 -{"-a": ["8", "9"]} - -r"""Usage: prog [options] - -Options: - --all ... Foo -""" -$ program -{"--all": 0} -$ program --all -{"--all": 1} -$ program --all --all -{"--all": 2} - -r"""Usage: prog [options] - -Options: - --all=ARG ... Foo -""" -$ program -{"--all": []} -$ program --all 1 -{"--all": ["1"]} -$ program --all 2 --all 3 -{"--all": ["2", "3"]} - -r"""Usage: prog [options] - -Options: - --all ... Foo -""" -$ program --all --all -"user-error" - -r"""Usage: prog [options] - -Options: - --all ARG ... Foo -""" -$ program --all foo --all bar -"user-error" - -r"""Usage: prog --speed=ARG""" -$ program --speed 20 -{"--speed": "20"} -$ program --speed=20 -{"--speed": "20"} -$ program --speed=-20 -{"--speed": "-20"} -$ program --speed -20 -{"--speed": "-20"} - -# -# Issue 187: Fails to parse a default value containing ']' -# - -r"""usage: prog [--datetime=] - -options: --datetime= Regex for datetimes [default: ^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}] - -""" -$ prog -{"--datetime": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}"} - -# -# Issue 137: -x-y being seen as a positional argument -# - -r"""Usage: prog [options] - -Options: - -x ARG - -y""" -$ prog -x-y -{"-x": "-y"} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/src/test/testcases.rs cargo-0.19.0/vendor/docopt-0.6.86/src/test/testcases.rs --- cargo-0.17.0/vendor/docopt-0.6.86/src/test/testcases.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/src/test/testcases.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,801 +0,0 @@ -// !!! ATTENTION !!! -// This file is automatically generated by `scripts/mk-testcases`. -// Please do not edit this file directly! - -use Value::{Switch, Counted, Plain, List}; -use test::{get_args, map_from_alist, same_args}; - -test_expect!(test_0_testcases, "Usage: prog", &[], vec!()); - -test_user_error!(test_1_testcases, "Usage: prog", &["--xxx"]); - -test_expect!(test_2_testcases, "Usage: prog [options] - -Options: -a All.", &[], vec!(("-a", Switch(false)))); - -test_expect!(test_3_testcases, "Usage: prog [options] - -Options: -a All.", &["-a"], vec!(("-a", Switch(true)))); - -test_user_error!(test_4_testcases, "Usage: prog [options] - -Options: -a All.", &["-x"]); - -test_expect!(test_5_testcases, "Usage: prog [options] - -Options: --all All.", &[], vec!(("--all", Switch(false)))); - -test_expect!(test_6_testcases, "Usage: prog [options] - -Options: --all All.", &["--all"], vec!(("--all", Switch(true)))); - -test_user_error!(test_7_testcases, "Usage: prog [options] - -Options: --all All.", &["--xxx"]); - -test_expect!(test_8_testcases, "Usage: prog [options] - -Options: -v, --verbose Verbose.", &["--verbose"], vec!(("--verbose", Switch(true)))); - -test_user_error!(test_9_testcases, "Usage: prog [options] - -Options: -v, --verbose Verbose.", &["--ver"]); - -test_expect!(test_10_testcases, "Usage: prog [options] - -Options: -v, --verbose Verbose.", &["-v"], vec!(("--verbose", Switch(true)))); - -test_expect!(test_11_testcases, "Usage: prog [options] - -Options: -p PATH", &["-p", "home/"], vec!(("-p", Plain(Some("home/".to_string()))))); - -test_expect!(test_12_testcases, "Usage: prog [options] - -Options: -p PATH", &["-phome/"], vec!(("-p", Plain(Some("home/".to_string()))))); - -test_user_error!(test_13_testcases, "Usage: prog [options] - -Options: -p PATH", &["-p"]); - -test_expect!(test_14_testcases, "Usage: prog [options] - -Options: --path ", &["--path", "home/"], vec!(("--path", Plain(Some("home/".to_string()))))); - -test_expect!(test_15_testcases, "Usage: prog [options] - -Options: --path ", &["--path=home/"], vec!(("--path", Plain(Some("home/".to_string()))))); - -test_user_error!(test_16_testcases, "Usage: prog [options] - -Options: --path ", &["--pa", "home/"]); - -test_user_error!(test_17_testcases, "Usage: prog [options] - -Options: --path ", &["--pa=home/"]); - -test_user_error!(test_18_testcases, "Usage: prog [options] - -Options: --path ", &["--path"]); - -test_expect!(test_19_testcases, "Usage: prog [options] - -Options: -p PATH, --path= Path to files.", &["-proot"], vec!(("--path", Plain(Some("root".to_string()))))); - -test_expect!(test_20_testcases, "Usage: prog [options] - -Options: -p --path PATH Path to files.", &["-p", "root"], vec!(("--path", Plain(Some("root".to_string()))))); - -test_expect!(test_21_testcases, "Usage: prog [options] - -Options: -p --path PATH Path to files.", &["--path", "root"], vec!(("--path", Plain(Some("root".to_string()))))); - -test_expect!(test_22_testcases, "Usage: prog [options] - -Options: - -p PATH Path to files [default: ./]", &[], vec!(("-p", Plain(Some("./".to_string()))))); - -test_expect!(test_23_testcases, "Usage: prog [options] - -Options: - -p PATH Path to files [default: ./]", &["-phome"], vec!(("-p", Plain(Some("home".to_string()))))); - -test_expect!(test_24_testcases, "UsAgE: prog [options] - -OpTiOnS: --path= Path to files - [dEfAuLt: /root]", &[], vec!(("--path", Plain(Some("/root".to_string()))))); - -test_expect!(test_25_testcases, "UsAgE: prog [options] - -OpTiOnS: --path= Path to files - [dEfAuLt: /root]", &["--path=home"], vec!(("--path", Plain(Some("home".to_string()))))); - -test_expect!(test_26_testcases, "usage: prog [options] - -options: - -a Add - -r Remote - -m Message", &["-a", "-r", "-m", "Hello"], vec!(("-m", Plain(Some("Hello".to_string()))), ("-a", Switch(true)), ("-r", Switch(true)))); - -test_expect!(test_27_testcases, "usage: prog [options] - -options: - -a Add - -r Remote - -m Message", &["-armyourass"], vec!(("-m", Plain(Some("yourass".to_string()))), ("-a", Switch(true)), ("-r", Switch(true)))); - -test_expect!(test_28_testcases, "usage: prog [options] - -options: - -a Add - -r Remote - -m Message", &["-a", "-r"], vec!(("-m", Plain(None)), ("-a", Switch(true)), ("-r", Switch(true)))); - -test_expect!(test_29_testcases, "Usage: prog [options] - -Options: --version - --verbose", &["--version"], vec!(("--verbose", Switch(false)), ("--version", Switch(true)))); - -test_expect!(test_30_testcases, "Usage: prog [options] - -Options: --version - --verbose", &["--verbose"], vec!(("--verbose", Switch(true)), ("--version", Switch(false)))); - -test_user_error!(test_31_testcases, "Usage: prog [options] - -Options: --version - --verbose", &["--ver"]); - -test_user_error!(test_32_testcases, "Usage: prog [options] - -Options: --version - --verbose", &["--verb"]); - -test_expect!(test_33_testcases, "usage: prog [-a -r -m ] - -options: - -a Add - -r Remote - -m Message", &["-armyourass"], vec!(("-m", Plain(Some("yourass".to_string()))), ("-a", Switch(true)), ("-r", Switch(true)))); - -test_expect!(test_34_testcases, "usage: prog [-armMSG] - -options: -a Add - -r Remote - -m Message", &["-a", "-r", "-m", "Hello"], vec!(("-m", Plain(Some("Hello".to_string()))), ("-a", Switch(true)), ("-r", Switch(true)))); - -test_expect!(test_35_testcases, "usage: prog -a -b - -options: - -a - -b", &["-a", "-b"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); - -test_expect!(test_36_testcases, "usage: prog -a -b - -options: - -a - -b", &["-b", "-a"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); - -test_user_error!(test_37_testcases, "usage: prog -a -b - -options: - -a - -b", &["-a"]); - -test_user_error!(test_38_testcases, "usage: prog -a -b - -options: - -a - -b", &[]); - -test_expect!(test_39_testcases, "usage: prog (-a -b) - -options: -a - -b", &["-a", "-b"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); - -test_expect!(test_40_testcases, "usage: prog (-a -b) - -options: -a - -b", &["-b", "-a"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); - -test_user_error!(test_41_testcases, "usage: prog (-a -b) - -options: -a - -b", &["-a"]); - -test_user_error!(test_42_testcases, "usage: prog (-a -b) - -options: -a - -b", &[]); - -test_expect!(test_43_testcases, "usage: prog [-a] -b - -options: -a - -b", &["-a", "-b"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); - -test_expect!(test_44_testcases, "usage: prog [-a] -b - -options: -a - -b", &["-b", "-a"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); - -test_user_error!(test_45_testcases, "usage: prog [-a] -b - -options: -a - -b", &["-a"]); - -test_expect!(test_46_testcases, "usage: prog [-a] -b - -options: -a - -b", &["-b"], vec!(("-a", Switch(false)), ("-b", Switch(true)))); - -test_user_error!(test_47_testcases, "usage: prog [-a] -b - -options: -a - -b", &[]); - -test_expect!(test_48_testcases, "usage: prog [(-a -b)] - -options: -a - -b", &["-a", "-b"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); - -test_expect!(test_49_testcases, "usage: prog [(-a -b)] - -options: -a - -b", &["-b", "-a"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); - -test_user_error!(test_50_testcases, "usage: prog [(-a -b)] - -options: -a - -b", &["-a"]); - -test_user_error!(test_51_testcases, "usage: prog [(-a -b)] - -options: -a - -b", &["-b"]); - -test_expect!(test_52_testcases, "usage: prog [(-a -b)] - -options: -a - -b", &[], vec!(("-a", Switch(false)), ("-b", Switch(false)))); - -test_user_error!(test_53_testcases, "usage: prog (-a|-b) - -options: -a - -b", &["-a", "-b"]); - -test_user_error!(test_54_testcases, "usage: prog (-a|-b) - -options: -a - -b", &[]); - -test_expect!(test_55_testcases, "usage: prog (-a|-b) - -options: -a - -b", &["-a"], vec!(("-a", Switch(true)), ("-b", Switch(false)))); - -test_expect!(test_56_testcases, "usage: prog (-a|-b) - -options: -a - -b", &["-b"], vec!(("-a", Switch(false)), ("-b", Switch(true)))); - -test_user_error!(test_57_testcases, "usage: prog [ -a | -b ] - -options: -a - -b", &["-a", "-b"]); - -test_expect!(test_58_testcases, "usage: prog [ -a | -b ] - -options: -a - -b", &[], vec!(("-a", Switch(false)), ("-b", Switch(false)))); - -test_expect!(test_59_testcases, "usage: prog [ -a | -b ] - -options: -a - -b", &["-a"], vec!(("-a", Switch(true)), ("-b", Switch(false)))); - -test_expect!(test_60_testcases, "usage: prog [ -a | -b ] - -options: -a - -b", &["-b"], vec!(("-a", Switch(false)), ("-b", Switch(true)))); - -test_expect!(test_61_testcases, "usage: prog ", &["10"], vec!(("", Plain(Some("10".to_string()))))); - -test_user_error!(test_62_testcases, "usage: prog ", &["10", "20"]); - -test_user_error!(test_63_testcases, "usage: prog ", &[]); - -test_expect!(test_64_testcases, "usage: prog []", &["10"], vec!(("", Plain(Some("10".to_string()))))); - -test_user_error!(test_65_testcases, "usage: prog []", &["10", "20"]); - -test_expect!(test_66_testcases, "usage: prog []", &[], vec!(("", Plain(None)))); - -test_expect!(test_67_testcases, "usage: prog ", &["10", "20", "40"], vec!(("", Plain(Some("40".to_string()))), ("", Plain(Some("10".to_string()))), ("", Plain(Some("20".to_string()))))); - -test_user_error!(test_68_testcases, "usage: prog ", &["10", "20"]); - -test_user_error!(test_69_testcases, "usage: prog ", &[]); - -test_expect!(test_70_testcases, "usage: prog [ ]", &["10", "20", "40"], vec!(("", Plain(Some("40".to_string()))), ("", Plain(Some("10".to_string()))), ("", Plain(Some("20".to_string()))))); - -test_expect!(test_71_testcases, "usage: prog [ ]", &["10", "20"], vec!(("", Plain(None)), ("", Plain(Some("10".to_string()))), ("", Plain(Some("20".to_string()))))); - -test_user_error!(test_72_testcases, "usage: prog [ ]", &[]); - -test_user_error!(test_73_testcases, "usage: prog [ | ]", &["10", "20", "40"]); - -test_expect!(test_74_testcases, "usage: prog [ | ]", &["20", "40"], vec!(("", Plain(Some("40".to_string()))), ("", Plain(None)), ("", Plain(Some("20".to_string()))))); - -test_expect!(test_75_testcases, "usage: prog [ | ]", &[], vec!(("", Plain(None)), ("", Plain(None)), ("", Plain(None)))); - -test_expect!(test_76_testcases, "usage: prog ( --all | ) - -options: - --all", &["10", "--all"], vec!(("--all", Switch(true)), ("", Plain(Some("10".to_string()))), ("", Plain(None)))); - -test_expect!(test_77_testcases, "usage: prog ( --all | ) - -options: - --all", &["10"], vec!(("--all", Switch(false)), ("", Plain(None)), ("", Plain(Some("10".to_string()))))); - -test_user_error!(test_78_testcases, "usage: prog ( --all | ) - -options: - --all", &[]); - -test_expect!(test_79_testcases, "usage: prog [ ]", &["10", "20"], vec!(("", List(vec!("10".to_string(), "20".to_string()))))); - -test_expect!(test_80_testcases, "usage: prog [ ]", &["10"], vec!(("", List(vec!("10".to_string()))))); - -test_expect!(test_81_testcases, "usage: prog [ ]", &[], vec!(("", List(vec!())))); - -test_expect!(test_82_testcases, "usage: prog [( )]", &["10", "20"], vec!(("", List(vec!("10".to_string(), "20".to_string()))))); - -test_user_error!(test_83_testcases, "usage: prog [( )]", &["10"]); - -test_expect!(test_84_testcases, "usage: prog [( )]", &[], vec!(("", List(vec!())))); - -test_expect!(test_85_testcases, "usage: prog NAME...", &["10", "20"], vec!(("NAME", List(vec!("10".to_string(), "20".to_string()))))); - -test_expect!(test_86_testcases, "usage: prog NAME...", &["10"], vec!(("NAME", List(vec!("10".to_string()))))); - -test_user_error!(test_87_testcases, "usage: prog NAME...", &[]); - -test_expect!(test_88_testcases, "usage: prog [NAME]...", &["10", "20"], vec!(("NAME", List(vec!("10".to_string(), "20".to_string()))))); - -test_expect!(test_89_testcases, "usage: prog [NAME]...", &["10"], vec!(("NAME", List(vec!("10".to_string()))))); - -test_expect!(test_90_testcases, "usage: prog [NAME]...", &[], vec!(("NAME", List(vec!())))); - -test_expect!(test_91_testcases, "usage: prog [NAME...]", &["10", "20"], vec!(("NAME", List(vec!("10".to_string(), "20".to_string()))))); - -test_expect!(test_92_testcases, "usage: prog [NAME...]", &["10"], vec!(("NAME", List(vec!("10".to_string()))))); - -test_expect!(test_93_testcases, "usage: prog [NAME...]", &[], vec!(("NAME", List(vec!())))); - -test_expect!(test_94_testcases, "usage: prog [NAME [NAME ...]]", &["10", "20"], vec!(("NAME", List(vec!("10".to_string(), "20".to_string()))))); - -test_expect!(test_95_testcases, "usage: prog [NAME [NAME ...]]", &["10"], vec!(("NAME", List(vec!("10".to_string()))))); - -test_expect!(test_96_testcases, "usage: prog [NAME [NAME ...]]", &[], vec!(("NAME", List(vec!())))); - -test_expect!(test_97_testcases, "usage: prog (NAME | --foo NAME) - -options: --foo", &["10"], vec!(("NAME", Plain(Some("10".to_string()))), ("--foo", Switch(false)))); - -test_expect!(test_98_testcases, "usage: prog (NAME | --foo NAME) - -options: --foo", &["--foo", "10"], vec!(("NAME", Plain(Some("10".to_string()))), ("--foo", Switch(true)))); - -test_user_error!(test_99_testcases, "usage: prog (NAME | --foo NAME) - -options: --foo", &["--foo=10"]); - -test_expect!(test_100_testcases, "usage: prog (NAME | --foo) [--bar | NAME] - -options: --foo -options: --bar", &["10"], vec!(("--bar", Switch(false)), ("NAME", List(vec!("10".to_string()))), ("--foo", Switch(false)))); - -test_expect!(test_101_testcases, "usage: prog (NAME | --foo) [--bar | NAME] - -options: --foo -options: --bar", &["10", "20"], vec!(("--bar", Switch(false)), ("NAME", List(vec!("10".to_string(), "20".to_string()))), ("--foo", Switch(false)))); - -test_expect!(test_102_testcases, "usage: prog (NAME | --foo) [--bar | NAME] - -options: --foo -options: --bar", &["--foo", "--bar"], vec!(("--bar", Switch(true)), ("NAME", List(vec!())), ("--foo", Switch(true)))); - -test_expect!(test_103_testcases, "Naval Fate. - -Usage: - prog ship new ... - prog ship [] move [--speed=] - prog ship shoot - prog mine (set|remove) [--moored|--drifting] - prog -h | --help - prog --version - -Options: - -h --help Show this screen. - --version Show version. - --speed= Speed in knots [default: 10]. - --moored Mored (anchored) mine. - --drifting Drifting mine.", &["ship", "Guardian", "move", "150", "300", "--speed=20"], vec!(("shoot", Switch(false)), ("--moored", Switch(false)), ("--drifting", Switch(false)), ("move", Switch(true)), ("--speed", Plain(Some("20".to_string()))), ("mine", Switch(false)), ("new", Switch(false)), ("--version", Switch(false)), ("set", Switch(false)), ("remove", Switch(false)), ("", List(vec!("Guardian".to_string()))), ("ship", Switch(true)), ("", Plain(Some("150".to_string()))), ("", Plain(Some("300".to_string()))), ("--help", Switch(false)))); - -test_expect!(test_104_testcases, "usage: prog --hello", &["--hello"], vec!(("--hello", Switch(true)))); - -test_expect!(test_105_testcases, "usage: prog [--hello=]", &[], vec!(("--hello", Plain(None)))); - -test_expect!(test_106_testcases, "usage: prog [--hello=]", &["--hello", "wrld"], vec!(("--hello", Plain(Some("wrld".to_string()))))); - -test_expect!(test_107_testcases, "usage: prog [-o]", &[], vec!(("-o", Switch(false)))); - -test_expect!(test_108_testcases, "usage: prog [-o]", &["-o"], vec!(("-o", Switch(true)))); - -test_expect!(test_109_testcases, "usage: prog [-opr]", &["-op"], vec!(("-o", Switch(true)), ("-p", Switch(true)), ("-r", Switch(false)))); - -test_expect!(test_110_testcases, "usage: prog --aabb | --aa", &["--aa"], vec!(("--aa", Switch(true)), ("--aabb", Switch(false)))); - -test_user_error!(test_111_testcases, "usage: prog --aabb | --aa", &["--a"]); - -test_expect!(test_112_testcases, "Usage: prog -v", &["-v"], vec!(("-v", Switch(true)))); - -test_expect!(test_113_testcases, "Usage: prog [-v -v]", &[], vec!(("-v", Counted(0)))); - -test_expect!(test_114_testcases, "Usage: prog [-v -v]", &["-v"], vec!(("-v", Counted(1)))); - -test_expect!(test_115_testcases, "Usage: prog [-v -v]", &["-vv"], vec!(("-v", Counted(2)))); - -test_user_error!(test_116_testcases, "Usage: prog -v ...", &[]); - -test_expect!(test_117_testcases, "Usage: prog -v ...", &["-v"], vec!(("-v", Counted(1)))); - -test_expect!(test_118_testcases, "Usage: prog -v ...", &["-vv"], vec!(("-v", Counted(2)))); - -test_expect!(test_119_testcases, "Usage: prog -v ...", &["-vvvvvv"], vec!(("-v", Counted(6)))); - -test_expect!(test_120_testcases, "Usage: prog [-v | -vv | -vvv] - -This one is probably most readable user-friednly variant.", &[], vec!(("-v", Counted(0)))); - -test_expect!(test_121_testcases, "Usage: prog [-v | -vv | -vvv] - -This one is probably most readable user-friednly variant.", &["-v"], vec!(("-v", Counted(1)))); - -test_expect!(test_122_testcases, "Usage: prog [-v | -vv | -vvv] - -This one is probably most readable user-friednly variant.", &["-vv"], vec!(("-v", Counted(2)))); - -test_user_error!(test_123_testcases, "Usage: prog [-v | -vv | -vvv] - -This one is probably most readable user-friednly variant.", &["-vvvv"]); - -test_expect!(test_124_testcases, "usage: prog [--ver --ver]", &["--ver", "--ver"], vec!(("--ver", Counted(2)))); - -test_expect!(test_125_testcases, "usage: prog [go]", &["go"], vec!(("go", Switch(true)))); - -test_expect!(test_126_testcases, "usage: prog [go go]", &[], vec!(("go", Counted(0)))); - -test_expect!(test_127_testcases, "usage: prog [go go]", &["go"], vec!(("go", Counted(1)))); - -test_expect!(test_128_testcases, "usage: prog [go go]", &["go", "go"], vec!(("go", Counted(2)))); - -test_user_error!(test_129_testcases, "usage: prog [go go]", &["go", "go", "go"]); - -test_expect!(test_130_testcases, "usage: prog go...", &["go", "go", "go", "go", "go"], vec!(("go", Counted(5)))); - -test_expect!(test_131_testcases, "usage: prog [options] [-a] - -options: -a - -b", &["-a"], vec!(("-a", Switch(true)), ("-b", Switch(false)))); - -test_user_error!(test_132_testcases, "usage: prog [options] [-a] - -options: -a - -b", &["-aa"]); - -test_expect!(test_133_testcases, "Usage: prog [options] A - -Options: - -q Be quiet - -v Be verbose.", &["arg"], vec!(("A", Plain(Some("arg".to_string()))), ("-v", Switch(false)), ("-q", Switch(false)))); - -test_expect!(test_134_testcases, "Usage: prog [options] A - -Options: - -q Be quiet - -v Be verbose.", &["-v", "arg"], vec!(("A", Plain(Some("arg".to_string()))), ("-v", Switch(true)), ("-q", Switch(false)))); - -test_expect!(test_135_testcases, "Usage: prog [options] A - -Options: - -q Be quiet - -v Be verbose.", &["-q", "arg"], vec!(("A", Plain(Some("arg".to_string()))), ("-v", Switch(false)), ("-q", Switch(true)))); - -test_expect!(test_136_testcases, "usage: prog [-]", &["-"], vec!(("-", Switch(true)))); - -test_expect!(test_137_testcases, "usage: prog [-]", &[], vec!(("-", Switch(false)))); - -test_expect!(test_138_testcases, "usage: prog [NAME [NAME ...]]", &["a", "b"], vec!(("NAME", List(vec!("a".to_string(), "b".to_string()))))); - -test_expect!(test_139_testcases, "usage: prog [NAME [NAME ...]]", &[], vec!(("NAME", List(vec!())))); - -test_expect!(test_140_testcases, "usage: prog [options] - -options: - -a Add - -m Message", &["-a"], vec!(("-m", Plain(None)), ("-a", Switch(true)))); - -test_expect!(test_141_testcases, "usage: prog --hello", &["--hello"], vec!(("--hello", Switch(true)))); - -test_expect!(test_142_testcases, "usage: prog [--hello=]", &[], vec!(("--hello", Plain(None)))); - -test_expect!(test_143_testcases, "usage: prog [--hello=]", &["--hello", "wrld"], vec!(("--hello", Plain(Some("wrld".to_string()))))); - -test_expect!(test_144_testcases, "usage: prog [-o]", &[], vec!(("-o", Switch(false)))); - -test_expect!(test_145_testcases, "usage: prog [-o]", &["-o"], vec!(("-o", Switch(true)))); - -test_expect!(test_146_testcases, "usage: prog [-opr]", &["-op"], vec!(("-o", Switch(true)), ("-p", Switch(true)), ("-r", Switch(false)))); - -test_expect!(test_147_testcases, "usage: git [-v | --verbose]", &["-v"], vec!(("-v", Switch(true)), ("--verbose", Switch(false)))); - -test_expect!(test_148_testcases, "usage: git remote [-v | --verbose]", &["remote", "-v"], vec!(("-v", Switch(true)), ("remote", Switch(true)), ("--verbose", Switch(false)))); - -test_expect!(test_149_testcases, "usage: prog", &[], vec!()); - -test_expect!(test_150_testcases, "usage: prog - prog ", &["1", "2"], vec!(("", Plain(Some("1".to_string()))), ("", Plain(Some("2".to_string()))))); - -test_expect!(test_151_testcases, "usage: prog - prog ", &[], vec!(("", Plain(None)), ("", Plain(None)))); - -test_expect!(test_152_testcases, "usage: prog - prog", &[], vec!(("", Plain(None)), ("", Plain(None)))); - -test_expect!(test_153_testcases, "usage: prog [--file=]", &[], vec!(("--file", Plain(None)))); - -test_expect!(test_154_testcases, "usage: prog [--file=] - -options: --file ", &[], vec!(("--file", Plain(None)))); - -test_expect!(test_155_testcases, "Usage: prog [-a ] - -Options: -a, --address TCP address [default: localhost:6283].", &[], vec!(("--address", Plain(Some("localhost:6283".to_string()))))); - -test_expect!(test_156_testcases, "usage: prog --long= ...", &["--long", "one"], vec!(("--long", List(vec!("one".to_string()))))); - -test_expect!(test_157_testcases, "usage: prog --long= ...", &["--long", "one", "--long", "two"], vec!(("--long", List(vec!("one".to_string(), "two".to_string()))))); - -test_expect!(test_158_testcases, "usage: prog (go --speed=)...", &["go", "left", "--speed=5", "go", "right", "--speed=9"], vec!(("go", Counted(2)), ("", List(vec!("left".to_string(), "right".to_string()))), ("--speed", List(vec!("5".to_string(), "9".to_string()))))); - -test_expect!(test_159_testcases, "usage: prog [options] -a - -options: -a", &["-a"], vec!(("-a", Switch(true)))); - -test_expect!(test_160_testcases, "usage: prog [-o ]... - -options: -o [default: x]", &["-o", "this", "-o", "that"], vec!(("-o", List(vec!("this".to_string(), "that".to_string()))))); - -test_expect!(test_161_testcases, "usage: prog [-o ]... - -options: -o [default: x]", &[], vec!(("-o", List(vec!("x".to_string()))))); - -test_expect!(test_162_testcases, "usage: prog [-o ]... - -options: -o [default: x y]", &["-o", "this"], vec!(("-o", List(vec!("this".to_string()))))); - -test_expect!(test_163_testcases, "usage: prog [-o ]... - -options: -o [default: x y]", &[], vec!(("-o", List(vec!("x".to_string(), "y".to_string()))))); - -test_expect!(test_164_testcases, "usage: prog -pPATH - -options: -p PATH", &["-pHOME"], vec!(("-p", Plain(Some("HOME".to_string()))))); - -test_expect!(test_165_testcases, "Usage: foo (--xx=X|--yy=Y)...", &["--xx=1", "--yy=2"], vec!(("--yy", List(vec!("2".to_string()))), ("--xx", List(vec!("1".to_string()))))); - -test_expect!(test_166_testcases, "usage: prog []", &["f.txt"], vec!(("", Plain(Some("f.txt".to_string()))))); - -test_expect!(test_167_testcases, "usage: prog [--input=]...", &["--input", "a.txt", "--input=b.txt"], vec!(("--input", List(vec!("a.txt".to_string(), "b.txt".to_string()))))); - -test_expect!(test_168_testcases, "usage: prog good [options] - prog fail [options] - -options: --loglevel=N", &["fail", "--loglevel", "5"], vec!(("fail", Switch(true)), ("good", Switch(false)), ("--loglevel", Plain(Some("5".to_string()))))); - -test_expect!(test_169_testcases, "usage:prog --foo", &["--foo"], vec!(("--foo", Switch(true)))); - -test_expect!(test_170_testcases, "PROGRAM USAGE: prog --foo", &["--foo"], vec!(("--foo", Switch(true)))); - -test_expect!(test_171_testcases, "Usage: prog --foo - prog --bar -NOT PART OF SECTION", &["--foo"], vec!(("--bar", Switch(false)), ("--foo", Switch(true)))); - -test_expect!(test_172_testcases, "Usage: - prog --foo - prog --bar - -NOT PART OF SECTION", &["--foo"], vec!(("--bar", Switch(false)), ("--foo", Switch(true)))); - -test_expect!(test_173_testcases, "Usage: - prog --foo - prog --bar -NOT PART OF SECTION", &["--foo"], vec!(("--bar", Switch(false)), ("--foo", Switch(true)))); - -test_expect!(test_174_testcases, "Usage: prog [options] - -global options: --foo -local options: --baz - --bar -other options: - --egg - --spam --not-an-option-", &["--bar", "--egg"], vec!(("--bar", Switch(true)), ("--egg", Switch(true)), ("--spam", Switch(false)))); - -test_expect!(test_175_testcases, "Usage: prog [-a] [--] [...]", &["-a"], vec!(("", List(vec!())), ("-a", Switch(true)))); - -test_expect!(test_176_testcases, "Usage: prog [-a] [--] [...]", &["--"], vec!(("", List(vec!())), ("-a", Switch(false)))); - -test_expect!(test_177_testcases, "Usage: prog [-a] [--] [...]", &["-a", "--", "-b"], vec!(("", List(vec!("-b".to_string()))), ("-a", Switch(true)))); - -test_expect!(test_178_testcases, "Usage: prog [-a] [--] [...]", &["-a", "--", "-a"], vec!(("", List(vec!("-a".to_string()))), ("-a", Switch(true)))); - -test_expect!(test_179_testcases, "Usage: prog [-a] [--] [...]", &["--", "-a"], vec!(("", List(vec!("-a".to_string()))), ("-a", Switch(false)))); - -test_expect!(test_180_testcases, "Usage: prog test [options] [--] [...]", &["test", "a", "--", "-b"], vec!(("", List(vec!("a".to_string(), "-b".to_string()))))); - -test_expect!(test_181_testcases, "Usage: prog test [options] [--] [...]", &["test", "--", "-b"], vec!(("", List(vec!("-b".to_string()))))); - -test_user_error!(test_182_testcases, "Usage: prog test [options] [--] [...]", &["test", "a", "-b"]); - -test_expect!(test_183_testcases, "Usage: prog test [options] [--] [...]", &["test", "--", "-b", "--"], vec!(("", List(vec!("-b".to_string(), "--".to_string()))))); - -test_expect!(test_184_testcases, "Usage: prog [options] - -Options: - -a ... Foo", &[], vec!(("-a", Counted(0)))); - -test_expect!(test_185_testcases, "Usage: prog [options] - -Options: - -a ... Foo", &["-a"], vec!(("-a", Counted(1)))); - -test_expect!(test_186_testcases, "Usage: prog [options] - -Options: - -a ... Foo", &["-a", "-a"], vec!(("-a", Counted(2)))); - -test_expect!(test_187_testcases, "Usage: prog [options] - -Options: - -a ... Foo", &["-aa"], vec!(("-a", Counted(2)))); - -test_expect!(test_188_testcases, "Usage: prog [options] - -Options: - -a ... Foo", &["-a", "-a", "-a"], vec!(("-a", Counted(3)))); - -test_expect!(test_189_testcases, "Usage: prog [options] - -Options: - -a ... Foo", &["-aaa"], vec!(("-a", Counted(3)))); - -test_expect!(test_190_testcases, "Usage: prog [options] - -Options: - -a, --all ... Foo", &[], vec!(("-a", Counted(0)))); - -test_expect!(test_191_testcases, "Usage: prog [options] - -Options: - -a, --all ... Foo", &["-a"], vec!(("-a", Counted(1)))); - -test_expect!(test_192_testcases, "Usage: prog [options] - -Options: - -a, --all ... Foo", &["-a", "--all"], vec!(("-a", Counted(2)))); - -test_expect!(test_193_testcases, "Usage: prog [options] - -Options: - -a, --all ... Foo", &["-aa", "--all"], vec!(("-a", Counted(3)))); - -test_expect!(test_194_testcases, "Usage: prog [options] - -Options: - -a, --all ... Foo", &["--all"], vec!(("-a", Counted(1)))); - -test_expect!(test_195_testcases, "Usage: prog [options] - -Options: - -a, --all ... Foo", &["--all", "--all"], vec!(("-a", Counted(2)))); - -test_expect!(test_196_testcases, "Usage: prog [options] - -Options: - -a, --all ARG ... Foo", &[], vec!(("-a", List(vec!())))); - -test_expect!(test_197_testcases, "Usage: prog [options] - -Options: - -a, --all ARG ... Foo", &["-a", "1"], vec!(("-a", List(vec!("1".to_string()))))); - -test_expect!(test_198_testcases, "Usage: prog [options] - -Options: - -a, --all ARG ... Foo", &["-a", "2", "--all", "3"], vec!(("-a", List(vec!("2".to_string(), "3".to_string()))))); - -test_expect!(test_199_testcases, "Usage: prog [options] - -Options: - -a, --all ARG ... Foo", &["-a4", "-a5", "--all", "6"], vec!(("-a", List(vec!("4".to_string(), "5".to_string(), "6".to_string()))))); - -test_expect!(test_200_testcases, "Usage: prog [options] - -Options: - -a, --all ARG ... Foo", &["--all", "7"], vec!(("-a", List(vec!("7".to_string()))))); - -test_expect!(test_201_testcases, "Usage: prog [options] - -Options: - -a, --all ARG ... Foo", &["--all", "8", "--all", "9"], vec!(("-a", List(vec!("8".to_string(), "9".to_string()))))); - -test_expect!(test_202_testcases, "Usage: prog [options] - -Options: - --all ... Foo", &[], vec!(("--all", Counted(0)))); - -test_expect!(test_203_testcases, "Usage: prog [options] - -Options: - --all ... Foo", &["--all"], vec!(("--all", Counted(1)))); - -test_expect!(test_204_testcases, "Usage: prog [options] - -Options: - --all ... Foo", &["--all", "--all"], vec!(("--all", Counted(2)))); - -test_expect!(test_205_testcases, "Usage: prog [options] - -Options: - --all=ARG ... Foo", &[], vec!(("--all", List(vec!())))); - -test_expect!(test_206_testcases, "Usage: prog [options] - -Options: - --all=ARG ... Foo", &["--all", "1"], vec!(("--all", List(vec!("1".to_string()))))); - -test_expect!(test_207_testcases, "Usage: prog [options] - -Options: - --all=ARG ... Foo", &["--all", "2", "--all", "3"], vec!(("--all", List(vec!("2".to_string(), "3".to_string()))))); - -test_user_error!(test_208_testcases, "Usage: prog [options] - -Options: - --all ... Foo", &["--all", "--all"]); - -test_user_error!(test_209_testcases, "Usage: prog [options] - -Options: - --all ARG ... Foo", &["--all", "foo", "--all", "bar"]); - -test_expect!(test_210_testcases, "Usage: prog --speed=ARG", &["--speed", "20"], vec!(("--speed", Plain(Some("20".to_string()))))); - -test_expect!(test_211_testcases, "Usage: prog --speed=ARG", &["--speed=20"], vec!(("--speed", Plain(Some("20".to_string()))))); - -test_expect!(test_212_testcases, "Usage: prog --speed=ARG", &["--speed=-20"], vec!(("--speed", Plain(Some("-20".to_string()))))); - -test_expect!(test_213_testcases, "Usage: prog --speed=ARG", &["--speed", "-20"], vec!(("--speed", Plain(Some("-20".to_string()))))); - -test_expect!(test_214_testcases, "usage: prog [--datetime=] - -options: --datetime= Regex for datetimes [default: ^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}]", &[], vec!(("--datetime", Plain(Some("^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}".to_string()))))); - -test_expect!(test_215_testcases, "Usage: prog [options] - -Options: - -x ARG - -y", &["-x-y"], vec!(("-x", Plain(Some("-y".to_string()))))); - diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/src/wordlist.rs cargo-0.19.0/vendor/docopt-0.6.86/src/wordlist.rs --- cargo-0.17.0/vendor/docopt-0.6.86/src/wordlist.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/src/wordlist.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -#[macro_use] -extern crate lazy_static; -extern crate regex; -extern crate rustc_serialize; -extern crate strsim; - -use std::collections::HashMap; -use std::io::{self, Read, Write}; - -use dopt::Docopt; -use parse::{Atom, Parser}; - -// cheat until we get syntax extensions back :-( -macro_rules! regex( - ($s:expr) => (::regex::Regex::new($s).unwrap()); -); - -macro_rules! werr( - ($($arg:tt)*) => ({ - use std::io::{Write, stderr}; - write!(&mut stderr(), $($arg)*).unwrap(); - }) -); - -#[allow(dead_code)] -mod dopt; -#[allow(dead_code)] -mod parse; -#[allow(dead_code)] -mod synonym; - -const USAGE: &'static str = " -Usage: docopt-wordlist [( )] ... - -docopt-wordlist prints a list of available flags and commands arguments for the -given usage (provided on stdin). - -Example use: - - your-command --help | docopt-wordlist - -This command also supports completing positional arguments when given a list of -choices. The choices are included in the word list if and only if the argument -name appears in the usage string. For example: - - your-command --help | docopt-wordlist 'arg' 'a b c' - -Which will only include 'a', 'b' and 'c' in the wordlist if -'your-command --help' contains a positional argument named 'arg'. -"; - -#[derive(Debug, RustcDecodable)] -struct Args { - arg_name: Vec, - arg_possibles: Vec, -} - -fn main() { - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(args) { - Ok(_) => {}, - Err(err) => { - write!(&mut io::stderr(), "{}", err).unwrap(); - ::std::process::exit(1) - } - } -} - -fn run(args: Args) -> Result<(), String> { - let mut usage = String::new(); - try!(io::stdin().read_to_string(&mut usage).map_err(|e| e.to_string())); - let parsed = try!(Parser::new(&usage).map_err(|e| e.to_string())); - let arg_possibles: HashMap> = - args.arg_name.iter() - .zip(args.arg_possibles.iter()) - .map(|(name, possibles)| { - let choices = - regex!(r"[ \t]+").split(&**possibles) - .map(|s| s.to_string()) - .collect::>(); - (name.clone(), choices) - }) - .collect(); - - let mut words = vec![]; - for k in parsed.descs.keys() { - if let Atom::Positional(ref arg_name) = *k { - if let Some(choices) = arg_possibles.get(arg_name) { - words.extend(choices.iter().map(|s| s.clone())); - } - // If the user hasn't given choices for this positional argument, - // then there's really nothing to complete here. - } else { - words.push(k.to_string()); - } - } - for (k, _) in parsed.descs.synonyms() { - // We don't need to do anything special here since synonyms can - // only be flags, which we always include in the wordlist. - words.push(k.to_string()); - } - println!("{}", words.join(" ")); - Ok(()) -} diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/.travis.yml cargo-0.19.0/vendor/docopt-0.6.86/.travis.yml --- cargo-0.17.0/vendor/docopt-0.6.86/.travis.yml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -language: rust -rust: - - stable - - beta - - nightly -script: - - cargo build --verbose - - cargo test --verbose - - cargo doc - - if [ "$TRAVIS_RUST_VERSION" = "nightly" ]; then - cd docopt_macros; - cargo build --verbose; - cargo test --verbose; - fi diff -Nru cargo-0.17.0/vendor/docopt-0.6.86/UNLICENSE cargo-0.19.0/vendor/docopt-0.6.86/UNLICENSE --- cargo-0.17.0/vendor/docopt-0.6.86/UNLICENSE 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.6.86/UNLICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/.cargo-checksum.json cargo-0.19.0/vendor/docopt-0.7.0/.cargo-checksum.json --- cargo-0.17.0/vendor/docopt-0.7.0/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/.cargo-checksum.json 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"4c1232061d048af6e883c43903fb9a75d8596560336ffd98ead534ef832d3ad7",".travis.yml":"48d991c260cc382d111df9be80e69a13a4896efc6ebad6275bd58b8c154287d5","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"663077949b16d2557d3b99bf98221075202704d95133cd51138c9bce633a7f59","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","Makefile":"db1787c5c7d2daea87d92c0549976a18bbe0601acb2ab5bd8dc5edb9f2b46e63","README.md":"ab86cce5636418ce23068c2a2377fd8645f5446554f2464e8ddab364345b0735","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","completions/docopt-wordlist.bash":"213bf1baea244eeb32af3a24a9ad895212cb538e3cdaee3bfed842b11a2a64d8","ctags.rust":"3d128d3cc59f702e68953ba2fe6c3f46bc6991fc575308db060482d5da0c79f3","examples/cargo.rs":"7f4b8a06244f8e82b94bb659c0346a258499b73aa5c76120261c7ebbfaaa3eb0","examples/cp.rs":"4820c6be6d60714927c8cfd9707b60ebe60b90a0df11cc71f342f4578352f366","examples/decode.rs":"2e5d713f977b7626015fc6e7049039a4f727379b8a5055a40c8d0871983a3044","examples/hashmap.rs":"9066a7b7192e15b3b667702519645d31926a371bc54ab8d70b211d98458d5a8d","examples/optional_command.rs":"eefe58a4ea192bc87262743785be40e9d38de2185b0d7fa8e3a9ac2386cb3d64","examples/verbose_multiple.rs":"47240d86b0a6e69b8156f59dfced062dad1cc418e9de50be85bd8c7ed036cfb9","scripts/mk-testcases":"649f37d391650175c8462171f7a98fce81735c9317630a5eb13db532ddb22976","session.vim":"1d51566b00f8ff2021d56948c1c55f123959f3e24879a6ad9337eccb11fc8fe9","src/dopt.rs":"74b9596e4b84fa7ac6b7a854fd339ddbd60850c9125eb6464c54cc8372b8d651","src/lib.rs":"89e2b734ed5043010b5f2d9f5e26b1376192bd053bcb32dedaf786a7b0e94da2","src/parse.rs":"786ef08bdaea5bf2a978274012856f6485155118201485f0b4970b37a6e75647","src/synonym.rs":"5eeec443a7df8549a31c1510fbeaeadde577d899cd651578cd7145835656b4a6","src/test/mod.rs":"38096547cc5cd6a02008e61e82a2ebfd08c809ca3b5054e17f4c1a3455dc7ccc","src/test/suggestions.rs":"51e044db856a424ef12d2bc2eb541ae922b93d81ac5548767c9c638ccd87d388","src/test/testcases.docopt":"13fcd2948a5625b76f93b98ac7b6cb53ef70c119fc2c5f85d2cb67e56bd4e9c3","src/test/testcases.rs":"cbecfab0c82249a7d8ad193ad5e9e10f45a7a41b37e69cfc025a9cdc6c213f04","src/wordlist.rs":"ef501dae8e2c8ff71f6abe6c63b6406c371bd2cc84955954d86b265203c0652a"},"package":"ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/Cargo.toml cargo-0.19.0/vendor/docopt-0.7.0/Cargo.toml --- cargo-0.17.0/vendor/docopt-0.7.0/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/Cargo.toml 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,26 @@ +[package] +name = "docopt" +version = "0.7.0" #:version +authors = ["Andrew Gallant "] +description = "Command line argument parsing." +documentation = "http://burntsushi.net/rustdoc/docopt/" +homepage = "https://github.com/docopt/docopt.rs" +repository = "https://github.com/docopt/docopt.rs" +readme = "README.md" +keywords = ["docopt", "argument", "command", "argv"] +license = "Unlicense/MIT" + +[lib] +name = "docopt" + +[[bin]] +name = "docopt-wordlist" +path = "src/wordlist.rs" +doc = false +test = false + +[dependencies] +lazy_static = "0.2" +regex = "0.2" +rustc-serialize = "0.3" +strsim = "0.6" diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/completions/docopt-wordlist.bash cargo-0.19.0/vendor/docopt-0.7.0/completions/docopt-wordlist.bash --- cargo-0.17.0/vendor/docopt-0.7.0/completions/docopt-wordlist.bash 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/completions/docopt-wordlist.bash 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,79 @@ +# This is your basic tab completion that will work well with commands that +# have only one usage (i.e., no distinct sub-commands). +# +# Completion works by simply taking the command name and running `$cmd --help` +# to get the usage (which is then parsed for possible completions). +function _docopt_wordlist { + if [ -z "$DOCOPT_WORDLIST_BIN" ]; then + DOCOPT_WORDLIST_BIN=/usr/local/bin/docopt-wordlist + fi + + cword=$(_get_cword) + cmd="${COMP_WORDS[0]}" + wordlist=$("$cmd" --help 2>&1 | "$DOCOPT_WORDLIST_BIN") + gen "$cword" "$wordlist" +} + +# This is a fancier version of the above that supports commands that have +# multiple sub-commands (i.e., distinct usages like Cargo). +# +# This supports sub-command completion only if `$cmd --list` shows a list of +# available sub-commands. +# +# Otherwise, the usage for the command `a b c d` is taken from the first +# command that exits successfully: +# +# a b c d --help +# a b c --help +# a b --help +# a --help +# +# So for example, if you've typed `cargo test --jo`, then the following +# happens: +# +# cargo test --jo --help # error +# cargo test --help # gives 'test' sub-command usage! +# +# As a special case, if only the initial command has been typed, then the +# sub-commands (taken from `$cmd --list`) are added to the wordlist. +function _docopt_wordlist_commands { + if [ -z "$DOCOPT_WORDLIST_BIN" ]; then + DOCOPT_WORDLIST_BIN=/usr/local/bin/docopt-wordlist + fi + + cword=$(_get_cword) + if [ "$COMP_CWORD" = 1 ]; then + cmd="${COMP_WORDS[0]}" + wordlist=$("$cmd" --help 2>&1 | "$DOCOPT_WORDLIST_BIN") + wordlist+=" $("$cmd" --list | egrep '^ +\w' | awk '{print $1}')" + gen "$cword" "$wordlist" + else + for ((i="$COMP_CWORD"; i >= 1; i++)); do + cmd="${COMP_WORDS[@]::$i}" + wordlist=$($cmd --help 2>&1 | "$DOCOPT_WORDLIST_BIN") + if [ $? = 0 ]; then + gen "$cword" "$wordlist" + break + fi + done + fi +} + +# A helper function for running `compgen`, which is responsible for taking +# a prefix and presenting possible completions. +# +# If the current prefix starts with a `.` or a `/`, then file/directory +# completion is done. Otherwise, Docopt completion is done. If Docopt +# completion is empty, then it falls back to file/directory completion. +function gen { + cword="$1" + wordlist="$2" + if [[ "$cword" = .* || "$cword" = /* ]]; then + COMPREPLY=($(compgen -A file -- "$cword")) + else + COMPREPLY=($(compgen -W "$wordlist" -- "$cword")) + if [ -z "$COMPREPLY" ]; then + COMPREPLY=($(compgen -A file -- "$cword")) + fi + fi +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/COPYING cargo-0.19.0/vendor/docopt-0.7.0/COPYING --- cargo-0.17.0/vendor/docopt-0.7.0/COPYING 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/COPYING 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,3 @@ +This project is dual-licensed under the Unlicense and MIT licenses. + +You may use this code under the terms of either license. diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/ctags.rust cargo-0.19.0/vendor/docopt-0.7.0/ctags.rust --- cargo-0.17.0/vendor/docopt-0.7.0/ctags.rust 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/ctags.rust 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,11 @@ +--langdef=Rust +--langmap=Rust:.rs +--regex-Rust=/^[ \t]*(#\[[^\]]\][ \t]*)*(pub[ \t]+)?(extern[ \t]+)?("[^"]+"[ \t]+)?(unsafe[ \t]+)?fn[ \t]+([a-zA-Z0-9_]+)/\6/f,functions,function definitions/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?type[ \t]+([a-zA-Z0-9_]+)/\2/T,types,type definitions/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?enum[ \t]+([a-zA-Z0-9_]+)/\2/g,enum,enumeration names/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?struct[ \t]+([a-zA-Z0-9_]+)/\2/s,structure names/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?mod[ \t]+([a-zA-Z0-9_]+)/\2/m,modules,module names/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?static[ \t]+([a-zA-Z0-9_]+)/\2/c,consts,static constants/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?trait[ \t]+([a-zA-Z0-9_]+)/\2/t,traits,traits/ +--regex-Rust=/^[ \t]*(pub[ \t]+)?impl([ \t\n]+<.*>)?[ \t]+([a-zA-Z0-9_]+)/\3/i,impls,trait implementations/ +--regex-Rust=/^[ \t]*macro_rules![ \t]+([a-zA-Z0-9_]+)/\1/d,macros,macro definitions/ diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/examples/cargo.rs cargo-0.19.0/vendor/docopt-0.7.0/examples/cargo.rs --- cargo-0.17.0/vendor/docopt-0.7.0/examples/cargo.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/examples/cargo.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,51 @@ +extern crate rustc_serialize; +extern crate docopt; + +use docopt::Docopt; + +// Write the Docopt usage string. +const USAGE: &'static str = " +Rust's package manager + +Usage: + cargo [...] + cargo [options] + +Options: + -h, --help Display this message + -V, --version Print version info and exit + --list List installed commands + -v, --verbose Use verbose output + +Some common cargo commands are: + build Compile the current project + clean Remove the target directory + doc Build this project's and its dependencies' documentation + new Create a new cargo project + run Build and execute src/main.rs + test Run the tests + bench Run the benchmarks + update Update dependencies listed in Cargo.lock + +See 'cargo help ' for more information on a specific command. +"; + +#[derive(Debug, RustcDecodable)] +struct Args { + arg_command: Option, + arg_args: Vec, + flag_list: bool, + flag_verbose: bool, +} + +#[derive(Debug, RustcDecodable)] +enum Command { + Build, Clean, Doc, New, Run, Test, Bench, Update, +} + +fn main() { + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.options_first(true).decode()) + .unwrap_or_else(|e| e.exit()); + println!("{:?}", args); +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/examples/cp.rs cargo-0.19.0/vendor/docopt-0.7.0/examples/cp.rs --- cargo-0.17.0/vendor/docopt-0.7.0/examples/cp.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/examples/cp.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,28 @@ +extern crate rustc_serialize; +extern crate docopt; + +use docopt::Docopt; + +// Write the Docopt usage string. +const USAGE: &'static str = " +Usage: cp [-a] + cp [-a] ... + +Options: + -a, --archive Copy everything. +"; + +#[derive(Debug, RustcDecodable)] +struct Args { + arg_source: Vec, + arg_dest: String, + arg_dir: String, + flag_archive: bool, +} + +fn main() { + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + println!("{:?}", args); +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/examples/decode.rs cargo-0.19.0/vendor/docopt-0.7.0/examples/decode.rs --- cargo-0.17.0/vendor/docopt-0.7.0/examples/decode.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/examples/decode.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,47 @@ +extern crate rustc_serialize; +extern crate docopt; + +use docopt::Docopt; + +const USAGE: &'static str = " +Naval Fate. + +Usage: + naval_fate.py ship new ... + naval_fate.py ship move [--speed=] + naval_fate.py ship shoot + naval_fate.py mine (set|remove) [--moored | --drifting] + naval_fate.py (-h | --help) + naval_fate.py --version + +Options: + -h --help Show this screen. + --version Show version. + --speed= Speed in knots [default: 10]. + --moored Moored (anchored) mine. + --drifting Drifting mine. +"; + +#[derive(Debug, RustcDecodable)] +struct Args { + flag_speed: isize, + flag_drifting: bool, + arg_name: Vec, + arg_x: Option, + arg_y: Option, + cmd_ship: bool, + cmd_mine: bool, +} + +fn main() { + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + println!("{:?}", args); + + println!("\nSome values:"); + println!(" Speed: {}", args.flag_speed); + println!(" Drifting? {}", args.flag_drifting); + println!(" Names: {:?}", args.arg_name); + println!(" Command 'ship' invoked? {:?}", args.cmd_ship); +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/examples/hashmap.rs cargo-0.19.0/vendor/docopt-0.7.0/examples/hashmap.rs --- cargo-0.17.0/vendor/docopt-0.7.0/examples/hashmap.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/examples/hashmap.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,39 @@ +extern crate docopt; + +use docopt::Docopt; + +const USAGE: &'static str = " +Naval Fate. + +Usage: + naval_fate.py ship new ... + naval_fate.py ship move [--speed=] + naval_fate.py ship shoot + naval_fate.py mine (set|remove) [--moored | --drifting] + naval_fate.py (-h | --help) + naval_fate.py --version + +Options: + -h --help Show this screen. + --version Show version. + --speed= Speed in knots [default: 10]. + --moored Moored (anchored) mine. + --drifting Drifting mine. +"; + +fn main() { + let version = "1.2.3".to_owned(); + let args = Docopt::new(USAGE) + .and_then(|dopt| dopt.version(Some(version)).parse()) + .unwrap_or_else(|e| e.exit()); + println!("{:?}", args); + + // You can conveniently access values with `get_{bool,count,str,vec}` + // functions. If the key doesn't exist (or if, e.g., you use `get_str` on + // a switch), then a sensible default value is returned. + println!("\nSome values:"); + println!(" Speed: {}", args.get_str("--speed")); + println!(" Drifting? {}", args.get_bool("--drifting")); + println!(" Names: {:?}", args.get_vec("")); + println!(" Command 'ship' invoked? {:?}", args.get_bool("ship")); +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/examples/optional_command.rs cargo-0.19.0/vendor/docopt-0.7.0/examples/optional_command.rs --- cargo-0.17.0/vendor/docopt-0.7.0/examples/optional_command.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/examples/optional_command.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,50 @@ +// This example shows how to implement a command with a "catch all." +// +// This requires writing your own impl for `Decodable` because docopt's +// decoder uses `Option` to mean "T may not be present" rather than +// "T may be present but incorrect." + +extern crate rustc_serialize; +extern crate docopt; + +use docopt::Docopt; +use rustc_serialize::{Decodable, Decoder}; + +// Write the Docopt usage string. +const USAGE: &'static str = " +Rust's package manager + +Usage: + mycli [] + +Options: + -h, --help Display this message +"; + +#[derive(Debug, RustcDecodable)] +struct Args { + arg_command: Command, +} + +impl Decodable for Command { + fn decode(d: &mut D) -> Result { + let s = try!(d.read_str()); + Ok(match &*s { + "" => Command::None, + "A" => Command::A, + "B" => Command::B, + "C" => Command::C, + s => Command::Unknown(s.to_string()), + }) + } +} + +#[derive(Debug)] +enum Command { A, B, C, Unknown(String), None } + +fn main() { + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + println!("{:?}", args); +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/examples/verbose_multiple.rs cargo-0.19.0/vendor/docopt-0.7.0/examples/verbose_multiple.rs --- cargo-0.17.0/vendor/docopt-0.7.0/examples/verbose_multiple.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/examples/verbose_multiple.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,36 @@ +extern crate rustc_serialize; +extern crate docopt; + +use docopt::Docopt; + +// This shows how to implement multiple levels of verbosity. +// +// When you have multiple patterns, I think the only way to carry the +// repeated flag through all of them is to specify it for each pattern +// explicitly. +// +// This is unfortunate. +const USAGE: &'static str = " +Usage: cp [options] [-v | -vv | -vvv] + cp [options] [-v | -vv | -vvv] ... + +Options: + -a, --archive Copy everything. + -v, --verbose Show extra log output. +"; + +#[derive(Debug, RustcDecodable)] +struct Args { + arg_source: Vec, + arg_dest: String, + arg_dir: String, + flag_archive: bool, + flag_verbose: usize, +} + +fn main() { + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + println!("{:?}", args); +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/.gitignore cargo-0.19.0/vendor/docopt-0.7.0/.gitignore --- cargo-0.17.0/vendor/docopt-0.7.0/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/.gitignore 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,6 @@ +.*.swp +doc +tags +target +scratch* +Cargo.lock diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/LICENSE-MIT cargo-0.19.0/vendor/docopt-0.7.0/LICENSE-MIT --- cargo-0.17.0/vendor/docopt-0.7.0/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/LICENSE-MIT 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Gallant + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/Makefile cargo-0.19.0/vendor/docopt-0.7.0/Makefile --- cargo-0.17.0/vendor/docopt-0.7.0/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/Makefile 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,18 @@ +all: + @echo Nothing to do + +docs: $(LIB_FILES) + cargo doc + # WTF is rustdoc doing? + in-dir ./target/doc fix-perms + rscp ./target/doc/* gopher:~/www/burntsushi.net/rustdoc/ + +src/test/testcases.rs: src/test/testcases.docopt scripts/mk-testcases + ./scripts/mk-testcases ./src/test/testcases.docopt > ./src/test/testcases.rs + +ctags: + ctags --recurse --options=ctags.rust --languages=Rust + +push: + git push github master + git push origin master diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/README.md cargo-0.19.0/vendor/docopt-0.7.0/README.md --- cargo-0.17.0/vendor/docopt-0.7.0/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/README.md 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,343 @@ +Docopt for Rust with automatic type based decoding (i.e., data validation). +This implementation conforms to the +[official description of Docopt](http://docopt.org/) and +[passes its test suite](https://github.com/docopt/docopt/pull/201). + +[![Build status](https://api.travis-ci.org/docopt/docopt.rs.svg)](https://travis-ci.org/docopt/docopt.rs) +[![](http://meritbadge.herokuapp.com/docopt)](https://crates.io/crates/docopt) + +Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org). + + +### Current status + +Fully functional but the design of the API is up for debate. **I am seeking +feedback**. + + +### Documentation + +[http://burntsushi.net/rustdoc/docopt](http://burntsushi.net/rustdoc/docopt/index.html) + + +### Installation + +This crate is fully compatible with Cargo. Just add it to your `Cargo.toml`: + +```toml +[dependencies] +docopt = "0.6" +rustc-serialize = "0.3" # if you're using `derive(RustcDecodable)` +``` + +If you want to use the macro, then add `docopt_macros = "0.6"` instead. +Note that the **`docopt!` macro only works on a nightly Rust compiler** because +it is a compiler plugin. + + +### Quick example + +Here is a full working example. Notice that you can specify the types of each +of the named values in the Docopt usage string. Values will be automatically +converted to those types (or an error will be reported). + +```rust +extern crate rustc_serialize; +extern crate docopt; + +use docopt::Docopt; + +const USAGE: &'static str = " +Naval Fate. + +Usage: + naval_fate.py ship new ... + naval_fate.py ship move [--speed=] + naval_fate.py ship shoot + naval_fate.py mine (set|remove) [--moored | --drifting] + naval_fate.py (-h | --help) + naval_fate.py --version + +Options: + -h --help Show this screen. + --version Show version. + --speed= Speed in knots [default: 10]. + --moored Moored (anchored) mine. + --drifting Drifting mine. +"; + +#[derive(Debug, RustcDecodable)] +struct Args { + flag_speed: isize, + flag_drifting: bool, + arg_name: Vec, + arg_x: Option, + arg_y: Option, + cmd_ship: bool, + cmd_mine: bool, +} + +fn main() { + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + println!("{:?}", args); +} +``` + +Here is the same example, but with the use of the `docopt!` macro, which will +*generate a struct for you*. Note that this uses a compiler plugin, so it only +works on a **nightly Rust compiler**: + +```rust +#![feature(plugin)] +#![plugin(docopt_macros)] + +extern crate rustc_serialize; +extern crate docopt; + +use docopt::Docopt; + +docopt!(Args derive Debug, " +Naval Fate. + +Usage: + naval_fate.py ship new ... + naval_fate.py ship move [--speed=] + naval_fate.py ship shoot + naval_fate.py mine (set|remove) [--moored | --drifting] + naval_fate.py (-h | --help) + naval_fate.py --version + +Options: + -h --help Show this screen. + --version Show version. + --speed= Speed in knots [default: 10]. + --moored Moored (anchored) mine. + --drifting Drifting mine. +"); + +fn main() { + let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit()); + println!("{:?}", args); +} +``` + +The `Args` struct has one static method defined for it: `docopt`. The method +returns a normal `Docopt` value, which can be used to set configuration +options, `argv` and parse or decode command line arguments. + + +### Struct field name mapping + +The field names of the struct map like this: + +``` +-g => flag_g +--group => flag_group +--group => flag_group +FILE => arg_FILE + => arg_file +build => cmd_build +``` + + +### Data validation example + +Here's another example that shows how to specify the types of your arguments: + +```rust +#![feature(plugin)] +#![plugin(docopt_macros)] + +extern crate rustc_serialize; + +extern crate docopt; + +docopt!(Args, "Usage: add ", arg_x: i32, arg_y: i32); + +fn main() { + let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit()); + println!("x: {}, y: {}", args.arg_x, args.arg_y); +} +``` + +In this example, specific type annotations were added. They will be +automatically inserted into the generated struct. You can override as many (or +as few) fields as you want. If you don't specify a type, then one of `bool`, +`u64`, `String` or `Vec` will be chosen depending on the type of +argument. In this case, both `arg_x` and `arg_y` would have been `String`. + +If any value cannot be decoded into a value with the right type, then an error +will be shown to the user. + +And of course, you don't need the macro to do this. You can do the same thing +with a manually written struct too. + + +### Modeling `rustc` + +Here's a selected subset for some of `rustc`'s options. This also shows how to +restrict values to a list of choices via an `enum` type and demonstrates more +Docopt features. + +```rust +#![feature(plugin)] +#![plugin(docopt_macros)] + +extern crate rustc_serialize; + +extern crate docopt; + +docopt!(Args derive Debug, " +Usage: rustc [options] [--cfg SPEC... -L PATH...] INPUT + rustc (--help | --version) + +Options: + -h, --help Show this message. + --version Show the version of rustc. + --cfg SPEC Configure the compilation environment. + -L PATH Add a directory to the library search path. + --emit TYPE Configure the output that rustc will produce. + Valid values: asm, ir, bc, obj, link. + --opt-level LEVEL Optimize with possible levels 0-3. +", flag_opt_level: Option, flag_emit: Option); + +#[derive(RustcDecodable, Debug)] +enum Emit { Asm, Ir, Bc, Obj, Link } + +#[derive(Debug)] +enum OptLevel { Zero, One, Two, Three } + +impl rustc_serialize::Decodable for OptLevel { + fn decode(d: &mut D) -> Result { + Ok(match try!(d.read_usize()) { + 0 => OptLevel::Zero, 1 => OptLevel::One, + 2 => OptLevel::Two, 3 => OptLevel::Three, + n => { + let err = format!("Could not decode '{}' as opt-level.", n); + return Err(d.error(&*err)); + } + }) + } +} + +fn main() { + let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit()); + println!("{:?}", args); +} +``` + +### Viewing the generated struct + +Generating a struct is pretty magical, but if you want, you can look at it by +expanding all macros. Say you wrote the above example for `Usage: add ` +into a file called `add.rs`. Then running: + +```bash +rustc -L path/containing/docopt/lib -Z unstable-options --pretty=expanded add.rs +``` + +Will show all macros expanded. The `path/containing/docopt/lib` is usually +`target/debug/deps` or `target/release/deps` in a cargo project. In the generated code, you should be +able to find the generated struct: + +```rust +struct Args { + pub arg_x: int, + pub arg_y: int, +} +``` + + +### Traditional Docopt API + +The reference implementation of Docopt returns a Python dictionary with names +like `` or `--flag`. If you prefer this access pattern, then you can use +`docopt::ArgvMap`. The disadvantage is that you have to do all of your type +conversion manually. Here's the canonical Docopt example with a hash table: + +```rust +extern crate docopt; + +use docopt::Docopt; + +const USAGE: &'static str = " +Naval Fate. + +Usage: + naval_fate.py ship new ... + naval_fate.py ship move [--speed=] + naval_fate.py ship shoot + naval_fate.py mine (set|remove) [--moored | --drifting] + naval_fate.py (-h | --help) + naval_fate.py --version + +Options: + -h --help Show this screen. + --version Show version. + --speed= Speed in knots [default: 10]. + --moored Moored (anchored) mine. + --drifting Drifting mine. +"; + +fn main() { + let args = Docopt::new(USAGE) + .and_then(|dopt| dopt.parse()) + .unwrap_or_else(|e| e.exit()); + println!("{:?}", args); + + // You can conveniently access values with `get_{bool,count,str,vec}` + // functions. If the key doesn't exist (or if, e.g., you use `get_str` on + // a switch), then a sensible default value is returned. + println!("\nSome values:"); + println!(" Speed: {}", args.get_str("--speed")); + println!(" Drifting? {}", args.get_bool("--drifting")); + println!(" Names: {:?}", args.get_vec("")); +} +``` + +### Tab completion support + +This particular implementation bundles a command called `docopt-wordlist` that +can be used to automate tab completion. This repository also collects some +basic completion support for various shells (currently only bash) in the +`completions` directory. + +You can use them to setup tab completion on your system. It should work with +any program that uses Docopt (or rather, any program that outputs usage +messages that look like Docopt). For example, to get tab completion support for +Cargo, you'll have to install `docopt-wordlist` and add some voodoo to your +`$HOME/.bash_completion` file (this may vary for other shells). + +Here it is step by step: + +```bash +# Download and build `docopt-wordlist` (as part of the Docopt package) +$ git clone git://github.com/docopt/docopt.rs +$ cd docopt.rs +$ cargo build --release + +# Now setup tab completion (for bash) +$ echo "DOCOPT_WORDLIST_BIN=\"$(pwd)/target/release/docopt-wordlist\"" >> $HOME/.bash_completion +$ echo "source \"$(pwd)/completions/docopt-wordlist.bash\"" >> $HOME/.bash_completion +$ echo "complete -F _docopt_wordlist_commands cargo" >> $HOME/.bash_completion +``` + +My [CSV toolkit](https://github.com/BurntSushi/xsv) is supported too: + +```bash +# shameless plug... +$ echo "complete -F _docopt_wordlist_commands xsv" >> $HOME/.bash_completion +``` + +Note that this is emphatically a first pass. There are several improvements +that I'd like to make: + +1. Take context into account when completing. For example, it should be + possible to only show completions that can lead to a valid Docopt match. + This may be hard. (i.e., It may require restructuring Docopt's internals.) +2. Support more shells. (I'll happily accept pull requests on this one. I doubt + I'll venture outside of bash any time soon.) +3. Make tab completion support more seamless. The way it works right now is + pretty hacky by intermingling file/directory completion. diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/scripts/mk-testcases cargo-0.19.0/vendor/docopt-0.7.0/scripts/mk-testcases --- cargo-0.17.0/vendor/docopt-0.7.0/scripts/mk-testcases 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/scripts/mk-testcases 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,80 @@ +#!/usr/bin/env python2 + +from __future__ import absolute_import, division, print_function +import argparse +import json +import re + +retests = re.compile('(.*?)"""(.*?)(r"""|\s*$)', re.DOTALL) +reinvokes = re.compile('(.+?$)(.+?)\s*(\$|\Z)', re.DOTALL | re.MULTILINE) + +p = argparse.ArgumentParser( + description="Outputs src/test/testcases.rs to stdout") +p.add_argument("testcases", metavar="FILE", + help="The testcases.docopt language agnostic test suite.") +args = p.parse_args() + +with open(args.testcases) as f: + alltests = f.read() + +alltests = re.sub('^r"""', '', alltests) +alltests = re.sub('^\s*#.*$', '', alltests, flags=re.MULTILINE) + +tests = [] # [{usage, args, expect}] (expect is None ==> user-error) +for m in retests.finditer(alltests): + usage, invokes = m.group(1).strip(), m.group(2).strip() + assert invokes.startswith('$'), 'Bad test: "%s"' % invokes + invokes = re.sub('^\$', '', invokes) + + for mi in reinvokes.finditer(invokes): + invoke, expect = mi.group(1).strip(), mi.group(2).strip() + err = expect.startswith('"user-error"') + tests.append({ + 'usage': usage, + 'args': invoke.split()[1:], + 'expect': None if err else json.loads(expect), + }) + + +def show_test(i, t): + def show_expect(e): + kvs = [] + for k, v in e.iteritems(): + kvs.append('("%s", %s)' % (k, show_value(v))) + return ', '.join(kvs) + def show_value(v): + if v is None: + return 'Plain(None)' + elif isinstance(v, basestring): + return 'Plain(Some("%s".to_string()))' % v + elif isinstance(v, bool): + return 'Switch(%s)' % ('true' if v else 'false') + elif isinstance(v, int): + return 'Counted(%d)' % v + elif isinstance(v, list): + elms = ', '.join(['"%s".to_string()' % el for el in v]) + return 'List(vec!(%s))' % elms + else: + raise ValueError('Unrecognized value: "%s" (type: %s)' + % (v, type(v))) + + args = ', '.join(['"%s"' % arg for arg in t['args']]) + if t['expect'] is None: + return 'test_user_error!(test_%d_testcases, "%s", &[%s]);' \ + % (i, t['usage'], args) + else: + expect = show_expect(t['expect']) + return 'test_expect!(test_%d_testcases, "%s", &[%s], vec!(%s));' \ + % (i, t['usage'], args, expect) + +print( +"""// !!! ATTENTION !!! +// This file is automatically generated by `scripts/mk-testcases`. +// Please do not edit this file directly! + +use Value::{{Switch, Counted, Plain, List}}; +use test::{{get_args, map_from_alist, same_args}}; + +{tests} +""".format(tests='\n\n'.join([show_test(i, t) for i, t in enumerate(tests)]))) + diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/session.vim cargo-0.19.0/vendor/docopt-0.7.0/session.vim --- cargo-0.17.0/vendor/docopt-0.7.0/session.vim 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/session.vim 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,3 @@ +au BufWritePost *.rs silent!make ctags > /dev/null 2>&1 +" let g:syntastic_rust_rustc_fname = "src/lib.rs" +" let g:syntastic_rust_rustc_args = "--no-trans" diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/src/dopt.rs cargo-0.19.0/vendor/docopt-0.7.0/src/dopt.rs --- cargo-0.17.0/vendor/docopt-0.7.0/src/dopt.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/src/dopt.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,897 @@ +use std::collections::HashMap; +use std::error::Error as StdError; +use std::fmt::{self, Debug}; +use std::io::{self, Write}; +use std::str::FromStr; + +use regex::{Captures, Regex}; +use rustc_serialize::Decodable; + +use parse::Parser; +use synonym::SynonymMap; + +use self::Value::{Switch, Counted, Plain, List}; +use self::Error::{Usage, Argv, NoMatch, Decode, WithProgramUsage, Help, Version}; + +use cap_or_empty; + +/// Represents the different types of Docopt errors. +/// +/// This error type has a lot of variants. In the common case, you probably +/// don't care why Docopt has failed, and would rather just quit the program +/// and show an error message instead. The `exit` method defined on the `Error` +/// type will do just that. It will also set the exit code appropriately +/// (no error for `--help` or `--version`, but an error code for bad usage, +/// bad argv, no match or bad decode). +/// +/// ### Example +/// +/// Generally, you want to parse the usage string, try to match the argv +/// and then quit the program if there was an error reported at any point +/// in that process. This can be achieved like so: +/// +/// ```no_run +/// use docopt::Docopt; +/// +/// const USAGE: &'static str = " +/// Usage: ... +/// "; +/// +/// let args = Docopt::new(USAGE) +/// .and_then(|d| d.parse()) +/// .unwrap_or_else(|e| e.exit()); +/// ``` +#[derive(Debug)] +pub enum Error { + /// Parsing the usage string failed. + /// + /// This error can only be triggered by the programmer, i.e., the writer + /// of the Docopt usage string. This error is usually indicative of a bug + /// in your program. + Usage(String), + + /// Parsing the argv specified failed. + /// + /// The payload is a string describing why the arguments provided could not + /// be parsed. + /// + /// This is distinct from `NoMatch` because it will catch errors like + /// using flags that aren't defined in the usage string. + Argv(String), + + /// The given argv parsed successfully, but it did not match any example + /// usage of the program. + /// + /// Regrettably, there is no descriptive message describing *why* the + /// given argv didn't match any of the usage strings. + NoMatch, + + /// This indicates a problem decoding a successful argv match into a + /// decodable value. + Decode(String), + + /// Parsing failed, and the program usage should be printed next to the + /// failure message. Typically this wraps `Argv` and `NoMatch` errors. + WithProgramUsage(Box, String), + + /// Decoding or parsing failed because the command line specified that the + /// help message should be printed. + Help, + + /// Decoding or parsing failed because the command line specified that the + /// version should be printed + /// + /// The version is included as a payload to this variant. + Version(String), +} + +impl Error { + /// Return whether this was a fatal error or not. + /// + /// Non-fatal errors include requests to print the help or version + /// information of a program, while fatal errors include those such as + /// failing to decode or parse. + pub fn fatal(&self) -> bool { + match *self { + Help | Version(..) => false, + Usage(..) | Argv(..) | NoMatch | Decode(..) => true, + WithProgramUsage(ref b, _) => b.fatal(), + } + } + + /// Print this error and immediately exit the program. + /// + /// If the error is non-fatal (e.g., `Help` or `Version`), then the + /// error is printed to stdout and the exit status will be `0`. Otherwise, + /// when the error is fatal, the error is printed to stderr and the + /// exit status will be `1`. + pub fn exit(&self) -> ! { + if self.fatal() { + werr!("{}\n", self); + ::std::process::exit(1) + } else { + let _ = writeln!(&mut io::stdout(), "{}", self); + ::std::process::exit(0) + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + WithProgramUsage(ref other, ref usage) => { + let other = other.to_string(); + if other.is_empty() { + write!(f, "{}", usage) + } else { + write!(f, "{}\n\n{}", other, usage) + } + } + Help => write!(f, ""), + NoMatch => write!(f, "Invalid arguments."), + Usage(ref s) | Argv(ref s) | Decode(ref s) | Version(ref s) => { + write!(f, "{}", s) + } + } + } +} + +impl StdError for Error { + fn description(&self) -> &str { + match *self { + Usage(..) => "invalid usage string", + Argv(..) => "failed to parse specified argv", + NoMatch => "could not match specified argv", + Decode(..) => "failed to decode", + WithProgramUsage(..) => "failed to parse specified argv", + Help => "help message requested", + Version(..) => "version message requested", + } + } + + fn cause(&self) -> Option<&StdError> { + match *self { + WithProgramUsage(ref cause, _) => Some(&**cause), + _ => None, + } + } +} + +/// The main Docopt type, which is constructed with a Docopt usage string. +/// +/// This can be used to match command line arguments to produce a `ArgvMap`. +#[derive(Clone, Debug)] +pub struct Docopt { + p: Parser, + argv: Option>, + options_first: bool, + help: bool, + version: Option, +} + +impl Docopt { + /// Parse the Docopt usage string given. + /// + /// The `Docopt` value returned may be used immediately to parse command + /// line arguments with a default configuration. + /// + /// If there was a problem parsing the usage string, a `Usage` error + /// is returned. + pub fn new(usage: S) -> Result + where S: ::std::ops::Deref { + Parser::new(usage.deref()) + .map_err(Usage) + .map(|p| Docopt { + p: p, + argv: None, + options_first: false, + help: true, + version: None, + }) + } + + /// Parse and decode the given argv. + /// + /// This is a convenience method for + /// `parse().and_then(|vals| vals.decode())`. + /// + /// For details on how decoding works, please see the documentation for + /// `ArgvMap`. + pub fn decode(&self) -> Result where D: Decodable { + self.parse().and_then(|vals| vals.decode()) + } + + /// Parse command line arguments and try to match them against a usage + /// pattern specified in the Docopt string. + /// + /// If there is a match, then an `ArgvMap` is returned, which maps + /// flags, commands and arguments to values. + /// + /// If parsing the command line arguments fails, then an `Argv` error is + /// returned. If parsing succeeds but there is no match, then a `NoMatch` + /// error is returned. Both of these errors are always returned inside a + /// `WithProgramUsage` error. + /// + /// If special handling of `help` or `version` is enabled (the former is + /// enabled by default), then `Help` or `Version` errors are returned + /// if `--help` or `--version` is present. + pub fn parse(&self) -> Result { + let argv = self.argv.clone().unwrap_or_else(Docopt::get_argv); + let vals = try!( + self.p.parse_argv(argv, self.options_first) + .map_err(|s| self.err_with_usage(Argv(s))) + .and_then(|argv| + match self.p.matches(&argv) { + Some(m) => Ok(ArgvMap { map: m }), + None => Err(self.err_with_usage(NoMatch)), + })); + if self.help && vals.get_bool("--help") { + return Err(self.err_with_full_doc(Help)); + } + match self.version { + Some(ref v) if vals.get_bool("--version") => { + return Err(Version(v.clone())) + } + _ => {}, + } + Ok(vals) + } + + /// Set the argv to be used for Docopt parsing. + /// + /// By default, when no argv is set, and it is automatically taken from + /// `std::env::args()`. + /// + /// The `argv` given *must* be the full set of `argv` passed to the + /// program. e.g., `["cp", "src", "dest"]` is right while `["src", "dest"]` + /// is wrong. + pub fn argv(mut self, argv: I) -> Docopt + where I: IntoIterator, S: AsRef { + self.argv = Some( + argv.into_iter().skip(1).map(|s| s.as_ref().to_owned()).collect() + ); + self + } + + /// Enables the "options first" Docopt behavior. + /// + /// The options first behavior means that all flags *must* appear before + /// position arguments. That is, after the first position argument is + /// seen, all proceeding arguments are interpreted as positional + /// arguments unconditionally. + pub fn options_first(mut self, yes: bool) -> Docopt { + self.options_first = yes; + self + } + + /// Enables automatic handling of `--help`. + /// + /// When this is enabled and `--help` appears anywhere in the arguments, + /// then a `Help` error will be returned. You may then use the `exit` + /// method on the error value to conveniently quit the program (which will + /// print the full usage string to stdout). + /// + /// Note that for this to work, `--help` must be a valid pattern. + /// + /// When disabled, there is no special handling of `--help`. + pub fn help(mut self, yes: bool) -> Docopt { + self.help = yes; + self + } + + /// Enables automatic handling of `--version`. + /// + /// When this is enabled and `--version` appears anywhere in the arguments, + /// then a `Version(s)` error will be returned, where `s` is the string + /// given here. You may then use the `exit` method on the error value to + /// convenient quit the program (which will print the version to stdout). + /// + /// When disabled (a `None` value), there is no special handling of + /// `--version`. + pub fn version(mut self, version: Option) -> Docopt { + self.version = version; + self + } + + #[doc(hidden)] + // Exposed for use in `docopt_macros`. + pub fn parser(&self) -> &Parser { + &self.p + } + + fn err_with_usage(&self, e: Error) -> Error { + WithProgramUsage( + Box::new(e), self.p.usage.trim().into()) + } + + fn err_with_full_doc(&self, e: Error) -> Error { + WithProgramUsage( + Box::new(e), self.p.full_doc.trim().into()) + } + + fn get_argv() -> Vec { + // Hmm, we should probably handle a Unicode decode error here... ---AG + ::std::env::args().skip(1).collect() + } +} + +/// A map containing matched values from command line arguments. +/// +/// The keys are just as specified in Docopt: `--flag` for a long flag or +/// `-f` for a short flag. (If `-f` is a synonym for `--flag`, then either +/// key will work.) `ARG` or `` specify a positional argument and `cmd` +/// specifies a command. +#[derive(Clone)] +pub struct ArgvMap { + #[doc(hidden)] + pub map: SynonymMap, +} + +impl ArgvMap { + /// Tries to decode the map of values into a struct. + /// + /// This method should always be called to decode a `ArgvMap` into + /// a struct. All fields of the struct must map to a corresponding key + /// in the `ArgvMap`. To this end, each member must have a special prefix + /// corresponding to the different kinds of patterns in Docopt. There are + /// three prefixes: `flag_`, `arg_` and `cmd_` which respectively + /// correspond to short/long flags, positional arguments and commands. + /// + /// If a Docopt item has a `-` in its name, then it is converted to an `_`. + /// + /// # Example + /// + /// ```rust + /// # extern crate docopt; + /// # extern crate rustc_serialize; + /// # fn main() { + /// use docopt::Docopt; + /// + /// const USAGE: &'static str = " + /// Usage: cargo [options] (build | test) + /// cargo --help + /// + /// Options: -v, --verbose + /// -h, --help + /// "; + /// + /// #[derive(RustcDecodable)] + /// struct Args { + /// cmd_build: bool, + /// cmd_test: bool, + /// flag_verbose: bool, + /// flag_h: bool, + /// } + /// + /// let argv = || vec!["cargo", "build", "-v"].into_iter(); + /// let args: Args = Docopt::new(USAGE) + /// .and_then(|d| d.argv(argv()).decode()) + /// .unwrap_or_else(|e| e.exit()); + /// assert!(args.cmd_build && !args.cmd_test + /// && args.flag_verbose && !args.flag_h); + /// # } + /// ``` + /// + /// Note that in the above example, `flag_h` is used but `flag_help` + /// could also be used. (In fact, both could be used at the same time.) + /// + /// In this example, only the `bool` type was used, but any type satisfying + /// the `Decodable` trait is valid. + pub fn decode(self) -> Result { + Decodable::decode(&mut Decoder { vals: self, stack: vec!() }) + } + + /// Finds the value corresponding to `key` and calls `as_bool()` on it. + /// If the key does not exist, `false` is returned. + pub fn get_bool(&self, key: &str) -> bool { + self.find(key).map_or(false, |v| v.as_bool()) + } + + /// Finds the value corresponding to `key` and calls `as_count()` on it. + /// If the key does not exist, `0` is returned. + pub fn get_count(&self, key: &str) -> u64 { + self.find(key).map_or(0, |v| v.as_count()) + } + + /// Finds the value corresponding to `key` and calls `as_str()` on it. + /// If the key does not exist, `""` is returned. + pub fn get_str(&self, key: &str) -> &str { + self.find(key).map_or("", |v| v.as_str()) + } + + /// Finds the value corresponding to `key` and calls `as_vec()` on it. + /// If the key does not exist, `vec!()` is returned. + pub fn get_vec(&self, key: &str) -> Vec<&str> { + self.find(key).map(|v| v.as_vec()).unwrap_or(vec!()) + } + + /// Return the raw value corresponding to some `key`. + /// + /// `key` should be a string in the traditional Docopt format. e.g., + /// `` or `--flag`. + pub fn find(&self, key: &str) -> Option<&Value> { + self.map.find(&key.into()) + } + + /// Return the number of values, not including synonyms. + pub fn len(&self) -> usize { + self.map.len() + } + + /// Converts a Docopt key to a struct field name. + /// This makes a half-hearted attempt at making the key a valid struct + /// field name (like replacing `-` with `_`), but it does not otherwise + /// guarantee that the result is a valid struct field name. + #[doc(hidden)] + pub fn key_to_struct_field(name: &str) -> String { + lazy_static! { + static ref RE: Regex = regex!( + r"^(?:--?(?P\S+)|(?:(?P\p{Lu}+)|<(?P[^>]+)>)|(?P\S+))$" + ); + } + fn sanitize(name: &str) -> String { + name.replace("-", "_") + } + + RE.replace(name, |cap: &Captures| { + let (flag, cmd) = ( + cap_or_empty(cap, "flag"), + cap_or_empty(cap, "cmd"), + ); + let (argu, argb) = ( + cap_or_empty(cap, "argu"), + cap_or_empty(cap, "argb"), + ); + let (prefix, name) = + if !flag.is_empty() { + ("flag_", flag) + } else if !argu.is_empty() { + ("arg_", argu) + } else if !argb.is_empty() { + ("arg_", argb) + } else if !cmd.is_empty() { + ("cmd_", cmd) + } else { + panic!("Unknown ArgvMap key: '{}'", name) + }; + let mut prefix = prefix.to_owned(); + prefix.push_str(&sanitize(name)); + prefix + }).into_owned() + } + + /// Converts a struct field name to a Docopt key. + #[doc(hidden)] + pub fn struct_field_to_key(field: &str) -> String { + lazy_static! { + static ref FLAG: Regex = regex!(r"^flag_"); + static ref ARG: Regex = regex!(r"^arg_"); + static ref LETTERS: Regex = regex!(r"^\p{Lu}+$"); + static ref CMD: Regex = regex!(r"^cmd_"); + } + fn desanitize(name: &str) -> String { + name.replace("_", "-") + } + let name = + if field.starts_with("flag_") { + let name = FLAG.replace(field, ""); + let mut pre_name = (if name.len() == 1 { "-" } else { "--" }) + .to_owned(); + pre_name.push_str(&*name); + pre_name + } else if field.starts_with("arg_") { + let name = ARG.replace(field, "").into_owned(); + if LETTERS.is_match(&name) { + name + } else { + let mut pre_name = "<".to_owned(); + pre_name.push_str(&*name); + pre_name.push('>'); + pre_name + } + } else if field.starts_with("cmd_") { + CMD.replace(field, "").into_owned() + } else { + panic!("Unrecognized struct field: '{}'", field) + }; + desanitize(&*name) + } +} + +impl fmt::Debug for ArgvMap { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.len() == 0 { + return write!(f, "{{EMPTY}}"); + } + + // This is a little crazy, but we want to group synonyms with + // their keys and sort them for predictable output. + let reverse: HashMap<&String, &String> = + self.map.synonyms().map(|(from, to)| (to, from)).collect(); + let mut keys: Vec<&String> = self.map.keys().collect(); + keys.sort(); + let mut first = true; + for &k in &keys { + if !first { try!(write!(f, "\n")); } else { first = false; } + match reverse.get(&k) { + None => { + try!(write!(f, "{} => {:?}", k, self.map.get(k))) + } + Some(s) => { + try!(write!(f, "{}, {} => {:?}", s, k, self.map.get(k))) + } + } + } + Ok(()) + } +} + +/// A matched command line value. +/// +/// The value can be a boolean, counted repetition, a plain string or a list +/// of strings. +/// +/// The various `as_{bool,count,str,vec}` methods provide convenient access +/// to values without destructuring manually. +#[derive(Clone, Debug, PartialEq)] +pub enum Value { + /// A boolean value from a flag that has no argument. + /// + /// The presence of a flag means `true` and the absence of a flag + /// means `false`. + Switch(bool), + + /// The number of occurrences of a repeated flag. + Counted(u64), + + /// A positional or flag argument. + /// + /// This is `None` when the positional argument or flag is not present. + /// Note that it is possible to have `Some("")` for a present but empty + /// argument. + Plain(Option), + + /// A List of positional or flag arguments. + /// + /// This list may be empty when no arguments or flags are present. + List(Vec), +} + +impl Value { + /// Returns the value as a bool. + /// + /// Counted repetitions are `false` if `0` and `true` otherwise. + /// Plain strings are `true` if present and `false` otherwise. + /// Lists are `true` if non-empty and `false` otherwise. + pub fn as_bool(&self) -> bool { + match *self { + Switch(b) => b, + Counted(n) => n > 0, + Plain(None) => false, + Plain(Some(_)) => true, + List(ref vs) => !vs.is_empty(), + } + } + + /// Returns the value as a count of the number of times it occurred. + /// + /// Booleans are `1` if `true` and `0` otherwise. + /// Plain strings are `1` if present and `0` otherwise. + /// Lists correspond to its length. + pub fn as_count(&self) -> u64 { + match *self { + Switch(b) => if b { 1 } else { 0 }, + Counted(n) => n, + Plain(None) => 0, + Plain(Some(_)) => 1, + List(ref vs) => vs.len() as u64, + } + } + + /// Returns the value as a string. + /// + /// All values return an empty string except for a non-empty plain string. + pub fn as_str(&self) -> &str { + match *self { + Switch(_) | Counted(_) | Plain(None) | List(_) => "", + Plain(Some(ref s)) => &**s, + } + } + + /// Returns the value as a list of strings. + /// + /// Booleans, repetitions and empty strings correspond to an empty list. + /// Plain strings correspond to a list of length `1`. + pub fn as_vec(&self) -> Vec<&str> { + match *self { + Switch(_) | Counted(_) | Plain(None) => vec![], + Plain(Some(ref s)) => vec![&**s], + List(ref vs) => vs.iter().map(|s| &**s).collect(), + } + } +} + +/// Decoder for `ArgvMap` into your own `Decodable` types. +/// +/// In general, you shouldn't have to use this type directly. It is exposed +/// in case you want to write a generic function that produces a decodable +/// value. For example, here's a function that takes a usage string, an argv +/// and produces a decodable value: +/// +/// ```rust +/// # extern crate docopt; +/// # extern crate rustc_serialize; +/// # fn main() { +/// use docopt::Docopt; +/// use rustc_serialize::Decodable; +/// +/// fn decode(usage: &str, argv: &[&str]) +/// -> Result { +/// Docopt::new(usage) +/// .and_then(|d| d.argv(argv.iter().cloned()).decode()) +/// } +/// # } +pub struct Decoder { + vals: ArgvMap, + stack: Vec, +} + +#[derive(Debug)] +struct DecoderItem { + key: String, + struct_field: String, + val: Option, +} + +macro_rules! derr( + ($($arg:tt)*) => (return Err(Decode(format!($($arg)*)))) +); + +impl Decoder { + fn push(&mut self, struct_field: &str) { + let key = ArgvMap::struct_field_to_key(struct_field); + self.stack.push(DecoderItem { + key: key.clone(), + struct_field: struct_field.into(), + val: self.vals.find(&*key).cloned(), + }); + } + + fn pop(&mut self) -> Result { + match self.stack.pop() { + None => derr!("Could not decode value into unknown key."), + Some(it) => Ok(it) + } + } + + fn pop_key_val(&mut self) -> Result<(String, Value), Error> { + let it = try!(self.pop()); + match it.val { + None => derr!( + "Could not find argument '{}' (from struct field '{}'). +Note that each struct field must have the right key prefix, which must +be one of `cmd_`, `flag_` or `arg_`.", + it.key, it.struct_field), + Some(v) => Ok((it.key, v)) + } + } + + fn pop_val(&mut self) -> Result { + let (_, v) = try!(self.pop_key_val()); + Ok(v) + } + + fn to_number(&mut self, expect: &str) -> Result + where T: FromStr + ToString, ::Err: Debug { + let (k, v) = try!(self.pop_key_val()); + match v { + Counted(n) => Ok(n.to_string().parse().unwrap()), // lol + _ => { + if v.as_str().trim().is_empty() { + Ok("0".parse().unwrap()) // lol + } else { + match v.as_str().parse() { + Err(_) => { + derr!("Could not decode '{}' to {} for '{}'.", + v.as_str(), expect, k) + } + Ok(v) => Ok(v), + } + } + } + } + } + + fn to_float(&mut self, expect: &str) -> Result { + let (k, v) = try!(self.pop_key_val()); + match v { + Counted(n) => Ok(n as f64), + _ => { + match v.as_str().parse() { + Err(_) => derr!("Could not decode '{}' to {} for '{}'.", + v.as_str(), expect, k), + Ok(v) => Ok(v), + } + } + } + } +} + +macro_rules! read_num { + ($name:ident, $ty:ty) => ( + fn $name(&mut self) -> Result<$ty, Error> { + self.to_number::<$ty>(stringify!($ty)).map(|n| n as $ty) + } + ); +} + +impl ::rustc_serialize::Decoder for Decoder { + type Error = Error; + + fn error(&mut self, err: &str) -> Error { + Decode(err.into()) + } + + fn read_nil(&mut self) -> Result<(), Error> { + // I don't know what the right thing is here, so just fail for now. + panic!("I don't know how to read into a nil value.") + } + + read_num!(read_usize, usize); + read_num!(read_u64, u64); + read_num!(read_u32, u32); + read_num!(read_u16, u16); + read_num!(read_u8, u8); + read_num!(read_isize, isize); + read_num!(read_i64, i64); + read_num!(read_i32, i32); + read_num!(read_i16, i16); + read_num!(read_i8, i8); + + fn read_bool(&mut self) -> Result { + self.pop_val().map(|v| v.as_bool()) + } + + fn read_f64(&mut self) -> Result { + self.to_float("f64") + } + + fn read_f32(&mut self) -> Result { + self.to_float("f32").map(|n| n as f32) + } + + fn read_char(&mut self) -> Result { + let (k, v) = try!(self.pop_key_val()); + let vstr = v.as_str(); + match vstr.chars().count() { + 1 => Ok(vstr.chars().next().unwrap()), + _ => derr!("Could not decode '{}' into char for '{}'.", vstr, k), + } + } + + fn read_str(&mut self) -> Result { + self.pop_val().map(|v| v.as_str().into()) + } + + fn read_enum(&mut self, _: &str, f: F) -> Result + where F: FnOnce(&mut Decoder) -> Result { + f(self) + } + + fn read_enum_variant(&mut self, names: &[&str], mut f: F) + -> Result + where F: FnMut(&mut Decoder, usize) -> Result { + let v = to_lowercase(try!(self.pop_val()).as_str()); + let i = + match names.iter().map(|&n| to_lowercase(n)).position(|n| n == v) { + Some(i) => i, + None => { + derr!("Could not match '{}' with any of \ + the allowed variants: {:?}", v, names) + } + }; + f(self, i) + } + + fn read_enum_variant_arg(&mut self, _: usize, _: F) + -> Result + where F: FnOnce(&mut Decoder) -> Result { + unimplemented!() + } + + fn read_enum_struct_variant(&mut self, _: &[&str], _: F) + -> Result + where F: FnMut(&mut Decoder, usize) -> Result { + unimplemented!() + } + + fn read_enum_struct_variant_field(&mut self, _: &str, _: usize, _: F) + -> Result + where F: FnOnce(&mut Decoder) -> Result { + unimplemented!() + } + + fn read_struct(&mut self, _: &str, _: usize, f: F) -> Result + where F: FnOnce(&mut Decoder) -> Result { + f(self) + } + + fn read_struct_field(&mut self, f_name: &str, _: usize, f: F) + -> Result + where F: FnOnce(&mut Decoder) -> Result { + self.push(f_name); + f(self) + } + + fn read_tuple(&mut self, _: usize, _: F) -> Result + where F: FnOnce(&mut Decoder) -> Result { + unimplemented!() + } + + fn read_tuple_arg(&mut self, _: usize, _: F) -> Result + where F: FnOnce(&mut Decoder) -> Result { + unimplemented!() + } + + fn read_tuple_struct(&mut self, _: &str, _: usize, _: F) + -> Result + where F: FnOnce(&mut Decoder) -> Result { + unimplemented!() + } + + fn read_tuple_struct_arg(&mut self, _: usize, _: F) + -> Result + where F: FnOnce(&mut Decoder) -> Result { + unimplemented!() + } + + fn read_option(&mut self, mut f: F) -> Result + where F: FnMut(&mut Decoder, bool) -> Result { + let option = + match self.stack.last() { + None => derr!("Could not decode value into unknown key."), + Some(it) => it.val.as_ref() + .map_or(false, |v| v.as_bool()) + }; + f(self, option) + } + + fn read_seq(&mut self, f: F) -> Result + where F: FnOnce(&mut Decoder, usize) -> Result { + let it = try!(self.pop()); + let list = it.val.unwrap_or(List(vec!())); + let vals = list.as_vec(); + for val in vals.iter().rev() { + self.stack.push(DecoderItem { + key: it.key.clone(), + struct_field: it.struct_field.clone(), + val: Some(Plain(Some((*val).into()))), + }) + } + f(self, vals.len()) + } + + fn read_seq_elt(&mut self, _: usize, f: F) -> Result + where F: FnOnce(&mut Decoder) -> Result { + f(self) + } + + fn read_map(&mut self, _: F) -> Result + where F: FnOnce(&mut Decoder, usize) -> Result { + unimplemented!() + } + + fn read_map_elt_key(&mut self, _: usize, _: F) -> Result + where F: FnOnce(&mut Decoder) -> Result { + unimplemented!() + } + + fn read_map_elt_val(&mut self, _: usize, _: F) -> Result + where F: FnOnce(&mut Decoder) -> Result { + unimplemented!() + } +} + +fn to_lowercase>(s: S) -> String { + s.into().chars().map(|c| c.to_lowercase().next().unwrap()).collect() +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/src/lib.rs cargo-0.19.0/vendor/docopt-0.7.0/src/lib.rs --- cargo-0.17.0/vendor/docopt-0.7.0/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/src/lib.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,246 @@ +//! Docopt for Rust. This implementation conforms to the +//! [official description of Docopt](http://docopt.org/) and +//! [passes its test suite](https://github.com/docopt/docopt/pull/201). +//! +//! This library is [on GitHub](https://github.com/docopt/docopt.rs). +//! +//! Fundamentally, Docopt is a command line argument parser. The detail that +//! distinguishes it from most parsers is that the parser is derived from the +//! usage string. Here's a simple example: +//! +//! ```rust +//! use docopt::Docopt; +//! +//! // Write the Docopt usage string. +//! const USAGE: &'static str = " +//! Usage: cp [-a] +//! cp [-a] ... +//! +//! Options: +//! -a, --archive Copy everything. +//! "; +//! +//! // The argv. Normally you'd just use `parse` which will automatically +//! // use `std::env::args()`. +//! let argv = || vec!["cp", "-a", "file1", "file2", "dest/"]; +//! +//! // Parse argv and exit the program with an error message if it fails. +//! let args = Docopt::new(USAGE) +//! .and_then(|d| d.argv(argv().into_iter()).parse()) +//! .unwrap_or_else(|e| e.exit()); +//! +//! // Now access your argv values. Synonyms work just fine! +//! assert!(args.get_bool("-a") && args.get_bool("--archive")); +//! assert_eq!(args.get_vec(""), vec!["file1", "file2"]); +//! assert_eq!(args.get_str(""), "dest/"); +//! assert_eq!(args.get_str(""), ""); +//! ``` +//! +//! # Type based decoding +//! +//! Often, command line values aren't just strings or booleans---sometimes +//! they are integers, or enums, or something more elaborate. Using the +//! standard Docopt interface can be inconvenient for this purpose, because +//! you'll need to convert all of the values explicitly. Instead, this crate +//! provides a `Decoder` that converts an `ArgvMap` to your custom struct. +//! Here is the same example as above using type based decoding: +//! +//! ```rust +//! # extern crate docopt; +//! # extern crate rustc_serialize; +//! # fn main() { +//! use docopt::Docopt; +//! +//! // Write the Docopt usage string. +//! const USAGE: &'static str = " +//! Usage: cp [-a] +//! cp [-a] ... +//! +//! Options: +//! -a, --archive Copy everything. +//! "; +//! +//! #[derive(RustcDecodable)] +//! struct Args { +//! arg_source: Vec, +//! arg_dest: String, +//! arg_dir: String, +//! flag_archive: bool, +//! } +//! +//! let argv = || vec!["cp", "-a", "file1", "file2", "dest/"]; +//! let args: Args = Docopt::new(USAGE) +//! .and_then(|d| d.argv(argv().into_iter()).decode()) +//! .unwrap_or_else(|e| e.exit()); +//! +//! // Now access your argv values. +//! fn s(x: &str) -> String { x.to_string() } +//! assert!(args.flag_archive); +//! assert_eq!(args.arg_source, vec![s("file1"), s("file2")]); +//! assert_eq!(args.arg_dir, s("dest/")); +//! assert_eq!(args.arg_dest, s("")); +//! # } +//! ``` +//! +//! # Command line arguments for `rustc` +//! +//! Here's an example with a subset of `rustc`'s command line arguments that +//! shows more of Docopt and some of the benefits of type based decoding. +//! +//! ```rust +//! # extern crate docopt; +//! # extern crate rustc_serialize; +//! # fn main() { +//! # #![allow(non_snake_case)] +//! use docopt::Docopt; +//! +//! // Write the Docopt usage string. +//! const USAGE: &'static str = " +//! Usage: rustc [options] [--cfg SPEC... -L PATH...] INPUT +//! rustc (--help | --version) +//! +//! Options: +//! -h, --help Show this message. +//! --version Show the version of rustc. +//! --cfg SPEC Configure the compilation environment. +//! -L PATH Add a directory to the library search path. +//! --emit TYPE Configure the output that rustc will produce. +//! Valid values: asm, ir, bc, obj, link. +//! --opt-level LEVEL Optimize with possible levels 0-3. +//! "; +//! +//! #[derive(RustcDecodable)] +//! struct Args { +//! arg_INPUT: String, +//! flag_emit: Option, +//! flag_opt_level: Option, +//! flag_cfg: Vec, +//! flag_L: Vec, +//! flag_help: bool, +//! flag_version: bool, +//! } +//! +//! // This is easy. The decoder will automatically restrict values to +//! // strings that match one of the enum variants. +//! #[derive(RustcDecodable)] +//! # #[derive(Debug, PartialEq)] +//! enum Emit { Asm, Ir, Bc, Obj, Link } +//! +//! // This one is harder because we want the user to specify an integer, +//! // but restrict it to a specific range. So we implement `Decodable` +//! // ourselves. +//! # #[derive(Debug, PartialEq)] +//! enum OptLevel { Zero, One, Two, Three } +//! +//! impl rustc_serialize::Decodable for OptLevel { +//! fn decode(d: &mut D) +//! -> Result { +//! Ok(match try!(d.read_usize()) { +//! 0 => OptLevel::Zero, 1 => OptLevel::One, +//! 2 => OptLevel::Two, 3 => OptLevel::Three, +//! n => { +//! let err = format!( +//! "Could not decode '{}' as opt-level.", n); +//! return Err(d.error(&*err)); +//! } +//! }) +//! } +//! } +//! +//! let argv = || vec!["rustc", "-L", ".", "-L", "..", "--cfg", "a", +//! "--opt-level", "2", "--emit=ir", "docopt.rs"]; +//! let args: Args = Docopt::new(USAGE) +//! .and_then(|d| d.argv(argv().into_iter()).decode()) +//! .unwrap_or_else(|e| e.exit()); +//! +//! // Now access your argv values. +//! fn s(x: &str) -> String { x.to_string() } +//! assert_eq!(args.arg_INPUT, "docopt.rs".to_string()); +//! assert_eq!(args.flag_L, vec![s("."), s("..")]); +//! assert_eq!(args.flag_cfg, vec![s("a")]); +//! assert_eq!(args.flag_opt_level, Some(OptLevel::Two)); +//! assert_eq!(args.flag_emit, Some(Emit::Ir)); +//! # } +//! ``` +//! +//! # The `docopt!` macro +//! +//! This package comes bundled with an additional crate, `docopt_macros`, +//! which provides a `docopt!` syntax extension. Its purpose is to automate +//! the creation of a Rust struct from a Docopt usage string. In particular, +//! this provides a single point of truth about the definition of command line +//! arguments in your program. +//! +//! Another advantage of using the macro is that errors in your Docopt usage +//! string will be caught at compile time. Stated differently, your program +//! will not compile with an invalid Docopt usage string. +//! +//! The example above using type based decoding can be simplified to this: +//! +//! ```ignore +//! #![feature(plugin)] +//! #![plugin(docopt_macros)] +//! +//! extern crate rustc_serialize; +//! +//! extern crate docopt; +//! +//! // Write the Docopt usage string with the `docopt!` macro. +//! docopt!(Args, " +//! Usage: cp [-a] +//! cp [-a] ... +//! +//! Options: +//! -a, --archive Copy everything. +//! ") +//! +//! fn main() { +//! let argv = || vec!["cp", "-a", "file1", "file2", "dest/"]; +//! +//! // Your `Args` struct has a single static method defined on it, +//! // `docopt`, which will return a normal `Docopt` value. +//! let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit()); +//! +//! // Now access your argv values. +//! fn s(x: &str) -> String { x.to_string() } +//! assert!(args.flag_archive); +//! assert_eq!(args.arg_source, vec![s("file1"), s("file2")]); +//! assert_eq!(args.arg_dir, s("dest/")); +//! assert_eq!(args.arg_dest, s("")); +//! } +//! ``` + +#![crate_name = "docopt"] +#![doc(html_root_url = "http://burntsushi.net/rustdoc/docopt")] + +#![deny(missing_docs)] + +#[macro_use] +extern crate lazy_static; +extern crate regex; +extern crate rustc_serialize; +extern crate strsim; + +pub use dopt::{ArgvMap, Decoder, Docopt, Error, Value}; + +macro_rules! werr( + ($($arg:tt)*) => ({ + use std::io::{Write, stderr}; + write!(&mut stderr(), $($arg)*).unwrap(); + }) +); + +macro_rules! regex( + ($s:expr) => (::regex::Regex::new($s).unwrap()); +); + +fn cap_or_empty<'t>(caps: ®ex::Captures<'t>, name: &str) -> &'t str { + caps.name(name).map_or("", |m| m.as_str()) +} + +mod dopt; +#[doc(hidden)] +pub mod parse; +mod synonym; +#[cfg(test)] +mod test; diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/src/parse.rs cargo-0.19.0/vendor/docopt-0.7.0/src/parse.rs --- cargo-0.17.0/vendor/docopt-0.7.0/src/parse.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/src/parse.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,1490 @@ +// I am overall pretty displeased with the quality of code in this module. +// I wrote it while simultaneously trying to build a mental model of Docopt's +// specification (hint: one does not exist in written form). As a result, there +// is a lot of coupling and some duplication. +// +// Some things that I think are good about the following code: +// +// - The representation of a "usage pattern." I think it is a minimal +// representation of a pattern's syntax. (One possible tweak: +// `Optional>` -> `Optional>`.) +// - Some disciplined use of regexes. I use a pretty basic state machine +// for parsing patterns, but for teasing out the patterns and options +// from the Docopt string and for picking out flags with arguments, I +// think regexes aren't too bad. There may be one or two scary ones though. +// - The core matching algorithm is reasonably simple and concise, but I +// think writing down some contracts will help me figure out how to make +// the code clearer. +// +// Some bad things: +// +// - I tried several times to split some of the pieces in this module into +// separate modules. I could find no clear separation. This suggests that +// there is too much coupling between parsing components. I'm not convinced +// that the coupling is necessary. +// - The parsers for patterns and argv share some structure. There may be +// an easy abstraction waiting there. +// - It is not efficient in the slightest. I tried to be conservative with +// copying strings, but I think I failed. (It may not be worthwhile to fix +// this if it makes the code more awkward. Docopt does not need to be +// efficient.) +// +// Some things to do immediately: +// +// - Document representation and invariants. +// - Less important: write contracts for functions. +// +// Long term: +// +// - Write a specification for Docopt. + +pub use self::Argument::{Zero, One}; +pub use self::Atom::{Short, Long, Command, Positional}; +use self::Pattern::{Alternates, Sequence, Optional, Repeat, PatAtom}; + +use std::borrow::ToOwned; +use std::collections::{HashMap, HashSet}; +use std::collections::hash_map::Entry::{Vacant, Occupied}; +use std::cmp::Ordering; +use std::fmt; +use regex; +use regex::Regex; +use strsim::levenshtein; + +use dopt::Value::{self, Switch, Counted, Plain, List}; +use synonym::SynonymMap; +use cap_or_empty; + +macro_rules! err( + ($($arg:tt)*) => (return Err(format!($($arg)*))) +); + +#[derive(Clone)] +pub struct Parser { + pub program: String, + pub full_doc: String, + pub usage: String, + pub descs: SynonymMap, + usages: Vec, + last_atom_added: Option, // context for [default: ...] +} + +impl Parser { + pub fn new(doc: &str) -> Result { + let mut d = Parser { + program: String::new(), + full_doc: doc.into(), + usage: String::new(), + usages: vec!(), + descs: SynonymMap::new(), + last_atom_added: None, + }; + try!(d.parse(doc)); + Ok(d) + } + + pub fn matches(&self, argv: &Argv) -> Option> { + for usage in &self.usages { + match Matcher::matches(argv, usage) { + None => continue, + Some(vals) => return Some(vals), + } + } + None + } + + pub fn parse_argv(&self, argv: Vec, options_first: bool) + -> Result { + Argv::new(self, argv, options_first) + } +} + +impl Parser { + fn options_atoms(&self) -> Vec { + let mut atoms = vec!(); + for (atom, _) in self.descs.iter().filter(|&(_, opts)| opts.is_desc) { + atoms.push(atom.clone()); + } + atoms + } + + fn has_arg(&self, atom: &Atom) -> bool { + match self.descs.find(atom) { + None => false, + Some(opts) => opts.arg.has_arg(), + } + } + + fn has_repeat(&self, atom: &Atom) -> bool { + match self.descs.find(atom) { + None => false, + Some(opts) => opts.repeats, + } + } + + fn parse(&mut self, doc: &str) -> Result<(), String> { + lazy_static! { + static ref MUSAGE: Regex = Regex::new( + r"(?s)(?i:usage):\s*(?P\S+)(?P.*?)(?:$|\n\s*\n)" + ).unwrap(); + } + let caps = match MUSAGE.captures(doc) { + None => err!("Could not find usage patterns in doc string."), + Some(caps) => caps, + }; + if cap_or_empty(&caps, "prog").is_empty() { + err!("Could not find program name in doc string.") + } + self.program = cap_or_empty(&caps, "prog").to_string(); + self.usage = caps[0].to_string(); + + // Before we parse the usage patterns, we look for option descriptions. + // We do this because the information in option descriptions can be + // used to resolve ambiguities in usage patterns (i.e., whether + // `--flag ARG` is a flag with an argument or not). + // + // From the docopt page, "every" line starting with a `-` or a `--` + // is considered an option description. Instead, we restrict the lines + // to any line *not* in the usage pattern section. + // + // *sigh* Apparently the above is not true. The official test suite + // includes `Options: -a ...`, which means some lines not beginning + // with `-` can actually have options. + let (pstart, pend) = caps.get(0).map(|m|(m.start(), m.end())).unwrap(); + let (before, after) = (&doc[..pstart], &doc[pend..]); + // We process every line here (instead of restricting to lines starting + // with "-") because we need to check every line for a default value. + // The default value always belongs to the most recently defined desc. + for line in before.lines().chain(after.lines()) { + try!(self.parse_desc(line)); + } + + let mprog = format!( + "^(?:{})?\\s*(.*?)\\s*$", + regex::escape(cap_or_empty(&caps, "prog"))); + let pats = Regex::new(&*mprog).unwrap(); + + if cap_or_empty(&caps, "pats").is_empty() { + let pattern = try!(PatParser::new(self, "").parse()); + self.usages.push(pattern); + } else { + for line in cap_or_empty(&caps, "pats").lines() { + for pat in pats.captures_iter(line.trim()) { + let pattern = try!(PatParser::new(self, &pat[1]).parse()); + self.usages.push(pattern); + } + } + } + Ok(()) + } + + fn parse_desc(&mut self, full_desc: &str) -> Result<(), String> { + lazy_static! { + static ref OPTIONS: Regex = regex!(r"^\s*(?i:options:)\s*"); + static ref ISFLAG: Regex = regex!(r"^(-\S|--\S)"); + static ref REMOVE_DESC: Regex = regex!(r" .*$"); + static ref NORMALIZE_FLAGS: Regex = regex!(r"([^-\s]), -"); + static ref FIND_FLAGS: Regex = regex!(r"(?x) + (?:(?P--[^\x20\t=]+)|(?P-[^\x20\t=]+)) + (?:(?:\x20|=)(?P[^.-]\S*))? + (?P\x20\.\.\.)? + "); + } + let desc = OPTIONS.replace(full_desc.trim(), ""); + let desc = &*desc; + if !ISFLAG.is_match(desc) { + try!(self.parse_default(full_desc)); + return Ok(()) + } + + // Get rid of the description, which must be at least two spaces + // after the flag or argument. + let desc = REMOVE_DESC.replace(desc, ""); + // Normalize `-x, --xyz` to `-x --xyz`. + let desc = NORMALIZE_FLAGS.replace(&desc, "$1 -"); + let desc = desc.trim(); + + let (mut short, mut long) = <(String, String)>::default(); + let mut has_arg = false; + let mut last_end = 0; + let mut repeated = false; + for flags in FIND_FLAGS.captures_iter(desc) { + last_end = flags.get(0).unwrap().end(); + if !cap_or_empty(&flags, "repeated").is_empty() { + // If the "repeated" subcapture is not empty, then we have + // a valid repeated option. + repeated = true; + } + let (s, l) = ( + cap_or_empty(&flags, "short"), + cap_or_empty(&flags, "long"), + ); + if !s.is_empty() { + if !short.is_empty() { + err!("Only one short flag is allowed in an option \ + description, but found '{}' and '{}'.", short, s) + } + short = s.into() + } + if !l.is_empty() { + if !long.is_empty() { + err!("Only one long flag is allowed in an option \ + description, but found '{}' and '{}'.", long, l) + } + long = l.into() + } + if let Some(arg) = flags.name("arg").map(|m| m.as_str()) { + if !arg.is_empty() { + if !Atom::is_arg(arg) { + err!("Argument '{}' is not of the form ARG or .", + arg) + } + has_arg = true; // may be changed to default later + } + } + } + // Make sure that we consumed everything. If there are leftovers, + // then there is some malformed description. Alert the user. + assert!(last_end <= desc.len()); + if last_end < desc.len() { + err!("Extraneous text '{}' in option description '{}'.", + &desc[last_end..], desc) + } + try!(self.add_desc(&short, &long, has_arg, repeated)); + // Looking for default in this line must come after adding the + // description, otherwise `parse_default` won't know which option + // to assign it to. + self.parse_default(full_desc) + } + + fn parse_default(&mut self, desc: &str) -> Result<(), String> { + lazy_static! { + static ref FIND_DEFAULT: Regex = regex!( + r"\[(?i:default):(?P.*)\]" + ); + } + let defval = + match FIND_DEFAULT.captures(desc) { + None => return Ok(()), + Some(c) => cap_or_empty(&c, "val").trim(), + }; + let last_atom = + match self.last_atom_added { + None => err!("Found default value '{}' in '{}' before first \ + option description.", defval, desc), + Some(ref atom) => atom, + }; + let opts = + self.descs + .find_mut(last_atom) + .expect(&*format!("BUG: last opt desc key ('{:?}') is invalid.", + last_atom)); + match opts.arg { + One(None) => {}, // OK + Zero => + err!("Cannot assign default value '{}' to flag '{}' \ + that has no arguments.", defval, last_atom), + One(Some(ref curval)) => + err!("Flag '{}' already has a default value \ + of '{}' (second default value: '{}').", + last_atom, curval, defval), + } + opts.arg = One(Some(defval.into())); + Ok(()) + } + + fn add_desc( + &mut self, + short: &str, + long: &str, + has_arg: bool, + repeated: bool, + ) -> Result<(), String> { + assert!(!short.is_empty() || !long.is_empty()); + if !short.is_empty() && short.chars().count() != 2 { + // It looks like the reference implementation just ignores + // these lines. + return Ok(()); + } + let mut opts = Options::new( + repeated, if has_arg { One(None) } else { Zero }); + opts.is_desc = true; + + if !short.is_empty() && !long.is_empty() { + let (short, long) = (Atom::new(short), Atom::new(long)); + self.descs.insert(long.clone(), opts); + self.descs.insert_synonym(short, long.clone()); + self.last_atom_added = Some(long); + } else if !short.is_empty() { + let short = Atom::new(short); + self.descs.insert(short.clone(), opts); + self.last_atom_added = Some(short); + } else if !long.is_empty() { + let long = Atom::new(long); + self.descs.insert(long.clone(), opts); + self.last_atom_added = Some(long); + } + Ok(()) + } +} + +impl fmt::Debug for Parser { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn sorted(mut xs: Vec) -> Vec { + xs.sort(); xs + } + + try!(writeln!(f, "=====")); + try!(writeln!(f, "Program: {}", self.program)); + + try!(writeln!(f, "Option descriptions:")); + let keys = sorted(self.descs.keys().collect()); + for &k in &keys { + try!(writeln!(f, " '{}' => {:?}", k, self.descs.get(k))); + } + + try!(writeln!(f, "Synonyms:")); + let keys: Vec<(&Atom, &Atom)> = + sorted(self.descs.synonyms().collect()); + for &(from, to) in &keys { + try!(writeln!(f, " {:?} => {:?}", from, to)); + } + + try!(writeln!(f, "Usages:")); + for pat in &self.usages { + try!(writeln!(f, " {:?}", pat)); + } + writeln!(f, "=====") + } +} + +struct PatParser<'a> { + dopt: &'a mut Parser, + tokens: Vec, // used while parsing a single usage pattern + curi: usize, // ^^ index into pattern chars + expecting: Vec, // stack of expected ']' or ')' +} + +impl<'a> PatParser<'a> { + fn new(dopt: &'a mut Parser, pat: &str) -> PatParser<'a> { + PatParser { + dopt: dopt, + tokens: pattern_tokens(pat), + curi: 0, + expecting: vec!(), + } + } + + fn parse(&mut self) -> Result { + // let mut seen = HashSet::new(); + let mut p = try!(self.pattern()); + match self.expecting.pop() { + None => {}, + Some(c) => err!("Unclosed group. Expected '{}'.", c), + } + p.add_options_shortcut(self.dopt); + p.tag_repeats(&mut self.dopt.descs); + Ok(p) + } + + fn pattern(&mut self) -> Result { + let mut alts = vec!(); + let mut seq = vec!(); + while !self.is_eof() { + match self.cur() { + "..." => { + err!("'...' must appear directly after a group, argument, \ + flag or command.") + } + "-" | "--" => { + // As per specification, `-` and `--` by themselves are + // just commands that should be interpreted conventionally. + seq.push(try!(self.command())); + } + "|" => { + if seq.is_empty() { + err!("Unexpected '|'. Not in form 'a | b | c'.") + } + try!(self.next_noeof("pattern")); + alts.push(Sequence(seq)); + seq = vec!(); + } + "]" | ")" => { + if seq.is_empty() { + err!("Unexpected '{}'. Empty groups are not allowed.", + self.cur()) + } + match self.expecting.pop() { + None => err!("Unexpected '{}'. No open bracket found.", + self.cur()), + Some(c) => { + if c != self.cur().chars().next().unwrap() { + err!("Expected '{}' but got '{}'.", + c, self.cur()) + } + } + } + let mk: fn(Vec) -> Pattern = + if self.cur() == "]" { Optional } else { Sequence }; + self.next(); + return + if alts.is_empty() { + Ok(mk(seq)) + } else { + alts.push(Sequence(seq)); + Ok(mk(vec!(Alternates(alts)))) + } + } + "[" => { + // Check for special '[options]' shortcut. + if self.atis(1, "options") && self.atis(2, "]") { + self.next(); // cur == options + self.next(); // cur == ] + self.next(); + seq.push(self.maybe_repeat(Optional(vec!()))); + continue + } + self.expecting.push(']'); + seq.push(try!(self.group())); + } + "(" => { + self.expecting.push(')'); + seq.push(try!(self.group())); + } + _ => { + if Atom::is_short(self.cur()) { + seq.extend(try!(self.flag_short()).into_iter()); + } else if Atom::is_long(self.cur()) { + seq.push(try!(self.flag_long())); + } else if Atom::is_arg(self.cur()) { + // These are always positional. + // Arguments for -s and --short are picked up + // when parsing flags. + seq.push(try!(self.positional())); + } else if Atom::is_cmd(self.cur()) { + seq.push(try!(self.command())); + } else { + err!("Unknown token type '{}'.", self.cur()) + } + } + } + } + if alts.is_empty() { + Ok(Sequence(seq)) + } else { + alts.push(Sequence(seq)); + Ok(Alternates(alts)) + } + } + + fn flag_short(&mut self) -> Result, String> { + let mut seq = vec!(); + let stacked: String = self.cur()[1..].into(); + for (i, c) in stacked.chars().enumerate() { + let atom = self.dopt.descs.resolve(&Short(c)); + let mut pat = PatAtom(atom.clone()); + if self.dopt.has_repeat(&atom) { + pat = Pattern::repeat(pat); + } + seq.push(pat); + + // The only way for a short option to have an argument is if + // it's specified in an option description. + if !self.dopt.has_arg(&atom) { + self.add_atom_ifnotexists(Zero, &atom); + } else { + // At this point, the flag MUST have an argument. Therefore, + // we interpret the "rest" of the characters as the argument. + // If the "rest" is empty, then we peek to find and make sure + // there is an argument. + let rest = &stacked[i+1..]; + if rest.is_empty() { + try!(self.next_flag_arg(&atom)); + } else { + try!(self.errif_invalid_flag_arg(&atom, rest)); + } + // We either error'd or consumed the rest of the short stack as + // an argument. + break + } + } + self.next(); + // This is a little weird. We've got to manually look for a repeat + // operator right after the stack, and then apply it to each short + // flag we generated. + // If "sequences" never altered semantics, then we could just use that + // here to group a short stack. + if self.atis(0, "...") { + self.next(); + seq = seq.into_iter().map(Pattern::repeat).collect(); + } + Ok(seq) + } + + fn flag_long(&mut self) -> Result { + let (atom, arg) = try!(parse_long_equal(self.cur())); + let atom = self.dopt.descs.resolve(&atom); + if self.dopt.descs.contains_key(&atom) { + // Options already exist for this atom, so we must check to make + // sure things are consistent. + let has_arg = self.dopt.has_arg(&atom); + if arg.has_arg() && !has_arg { + // Found `=` in usage, but previous usage of this flag + // didn't specify an argument. + err!("Flag '{}' does not take any arguments.", atom) + } else if !arg.has_arg() && has_arg { + // Didn't find any `=` in usage for this flag, but previous + // usage of this flag specifies an argument. + // So look for `--flag ARG` + try!(self.next_flag_arg(&atom)); + // We don't care about the value of `arg` since options + // already exist. (In which case, the argument value can never + // change.) + } + } + self.add_atom_ifnotexists(arg, &atom); + self.next(); + let pat = if self.dopt.has_repeat(&atom) { + Pattern::repeat(PatAtom(atom)) + } else { + PatAtom(atom) + }; + Ok(self.maybe_repeat(pat)) + } + + fn next_flag_arg(&mut self, atom: &Atom) -> Result<(), String> { + try!(self.next_noeof(&*format!("argument for flag '{}'", atom))); + self.errif_invalid_flag_arg(atom, self.cur()) + } + + fn errif_invalid_flag_arg(&self, atom: &Atom, arg: &str) + -> Result<(), String> { + if !Atom::is_arg(arg) { + err!("Expected argument for flag '{}', but found \ + malformed argument '{}'.", atom, arg) + } + Ok(()) + } + + fn command(&mut self) -> Result { + let atom = Atom::new(self.cur()); + self.add_atom_ifnotexists(Zero, &atom); + self.next(); + Ok(self.maybe_repeat(PatAtom(atom))) + } + + fn positional(&mut self) -> Result { + let atom = Atom::new(self.cur()); + self.add_atom_ifnotexists(Zero, &atom); + self.next(); + Ok(self.maybe_repeat(PatAtom(atom))) + } + + fn add_atom_ifnotexists(&mut self, arg: Argument, atom: &Atom) { + if !self.dopt.descs.contains_key(atom) { + let opts = Options::new(false, arg); + self.dopt.descs.insert(atom.clone(), opts); + } + } + + fn group(&mut self) + -> Result { + try!(self.next_noeof("pattern")); + let pat = try!(self.pattern()); + Ok(self.maybe_repeat(pat)) + } + + fn maybe_repeat(&mut self, pat: Pattern) -> Pattern { + if self.atis(0, "...") { + self.next(); + Pattern::repeat(pat) + } else { + pat + } + } + + fn is_eof(&self) -> bool { + self.curi == self.tokens.len() + } + fn next(&mut self) { + if self.curi == self.tokens.len() { + return + } + self.curi += 1; + } + fn next_noeof(&mut self, expected: &str) -> Result<(), String> { + self.next(); + if self.curi == self.tokens.len() { + err!("Expected {} but reached end of usage pattern.", expected) + } + Ok(()) + } + fn cur(&self) -> &str { + &*self.tokens[self.curi] + } + fn atis(&self, offset: usize, is: &str) -> bool { + let i = self.curi + offset; + i < self.tokens.len() && self.tokens[i] == is + } +} + +#[derive(Clone, Debug)] +enum Pattern { + Alternates(Vec), + Sequence(Vec), + Optional(Vec), + Repeat(Box), + PatAtom(Atom), +} + +#[derive(PartialEq, Eq, Ord, Hash, Clone, Debug)] +pub enum Atom { + Short(char), + Long(String), + Command(String), + Positional(String), +} + +#[derive(Clone, Debug)] +pub struct Options { + /// Set to true if this atom is ever repeated in any context. + /// For positional arguments, non-argument flags and commands, repetition + /// means that they become countable. + /// For flags with arguments, repetition means multiple distinct values + /// can be specified (and are represented as a Vec). + pub repeats: bool, + + /// This specifies whether this atom has any arguments. + /// For commands and positional arguments, this is always Zero. + /// Flags can have zero or one argument, with an optionally default value. + pub arg: Argument, + + /// Whether it shows up in the "options description" second. + pub is_desc: bool, +} + +#[derive(Clone, Debug, PartialEq)] +pub enum Argument { + Zero, + One(Option), // optional default value +} + +impl Pattern { + fn add_options_shortcut(&mut self, par: &Parser) { + fn add(pat: &mut Pattern, all_atoms: &HashSet, par: &Parser) { + match *pat { + Alternates(ref mut ps) | Sequence(ref mut ps) => { + for p in ps.iter_mut() { add(p, all_atoms, par) } + } + Repeat(ref mut p) => add(&mut **p, all_atoms, par), + PatAtom(_) => {} + Optional(ref mut ps) => { + if !ps.is_empty() { + for p in ps.iter_mut() { add(p, all_atoms, par) } + } else { + for atom in par.options_atoms().into_iter() { + if !all_atoms.contains(&atom) { + if par.has_repeat(&atom) { + ps.push(Pattern::repeat(PatAtom(atom))); + } else { + ps.push(PatAtom(atom)); + } + } + } + } + } + } + } + let all_atoms = self.all_atoms(); + add(self, &all_atoms, par); + } + + fn all_atoms(&self) -> HashSet { + fn all_atoms(pat: &Pattern, set: &mut HashSet) { + match *pat { + Alternates(ref ps) | Sequence(ref ps) | Optional(ref ps) => { + for p in ps.iter() { all_atoms(p, set) } + } + Repeat(ref p) => all_atoms(&**p, set), + PatAtom(ref a) => { set.insert(a.clone()); } + } + } + let mut set = HashSet::new(); + all_atoms(self, &mut set); + set + } + + fn tag_repeats(&self, map: &mut SynonymMap) { + fn dotag(p: &Pattern, + rep: bool, + map: &mut SynonymMap, + seen: &mut HashSet) { + match *p { + Alternates(ref ps) => { + // This is a bit tricky. Basically, we don't want the + // existence of an item in mutually exclusive alternations + // to affect whether it repeats or not. + // However, we still need to record seeing each item in + // each alternation. + let fresh = seen.clone(); + for p in ps.iter() { + let mut isolated = fresh.clone(); + dotag(p, rep, map, &mut isolated); + for a in isolated.into_iter() { + seen.insert(a); + } + } + } + Sequence(ref ps) => { + for p in ps.iter() { + dotag(p, rep, map, seen) + } + } + Optional(ref ps) => { + for p in ps.iter() { + dotag(p, rep, map, seen) + } + } + Repeat(ref p) => dotag(&**p, true, map, seen), + PatAtom(ref atom) => { + let opt = map.find_mut(atom).expect("bug: no atom found"); + opt.repeats = opt.repeats || rep || seen.contains(atom); + seen.insert(atom.clone()); + } + } + } + let mut seen = HashSet::new(); + dotag(self, false, map, &mut seen); + } + + fn repeat(p: Pattern) -> Pattern { + match p { + // Normalize [p1 p2]... into the equivalent [p1... p2...]. + Optional(ps) => Optional(ps.into_iter().map(Pattern::repeat).collect()), + p @ Repeat(_) => p, + p => Repeat(Box::new(p)), + } + } +} + +impl Atom { + pub fn new(s: &str) -> Atom { + if Atom::is_short(s) { + Short(s[1..].chars().next().unwrap()) + } else if Atom::is_long(s) { + Long(s[2..].into()) + } else if Atom::is_arg(s) { + if s.starts_with("<") && s.ends_with(">") { + Positional(s[1..s.len()-1].into()) + } else { + Positional(s.into()) + } + } else if Atom::is_cmd(s) { + Command(s.into()) + } else { + panic!("Unknown atom string: '{}'", s) + } + } + + fn is_short(s: &str) -> bool { + lazy_static! { + static ref RE: Regex = regex!(r"^-[^-]\S*$"); + } + RE.is_match(s) + } + + fn is_long(s: &str) -> bool { + lazy_static! { + static ref RE: Regex = regex!(r"^--\S+(?:<[^>]+>)?$"); + } + RE.is_match(s) + } + + fn is_long_argv(s: &str) -> bool { + lazy_static! { + static ref RE: Regex = regex!(r"^--\S+(=.+)?$"); + } + RE.is_match(s) + } + + fn is_arg(s: &str) -> bool { + lazy_static! { + static ref RE: Regex = regex!(r"^(\p{Lu}+|<[^>]+>)$"); + } + RE.is_match(s) + } + + fn is_cmd(s: &str) -> bool { + lazy_static! { + static ref RE: Regex = regex!(r"^(-|--|[^-]\S*)$"); + } + RE.is_match(s) + } + + // Assigns an integer to each variant of Atom. (For easier sorting.) + fn type_as_usize(&self) -> usize { + match *self { + Short(_) => 0, + Long(_) => 1, + Command(_) => 2, + Positional(_) => 3, + } + } +} + +impl PartialOrd for Atom { + fn partial_cmp(&self, other: &Atom) -> Option { + match (self, other) { + (&Short(c1), &Short(c2)) => c1.partial_cmp(&c2), + (&Long(ref s1), &Long(ref s2)) => s1.partial_cmp(s2), + (&Command(ref s1), &Command(ref s2)) => s1.partial_cmp(s2), + (&Positional(ref s1), &Positional(ref s2)) => s1.partial_cmp(s2), + (a1, a2) => a1.type_as_usize().partial_cmp(&a2.type_as_usize()), + } + } +} + +impl fmt::Display for Atom { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Short(c) => write!(f, "-{}", c), + Long(ref s) => write!(f, "--{}", s), + Command(ref s) => write!(f, "{}", s), + Positional(ref s) => { + if s.chars().all(|c| c.is_uppercase()) { + write!(f, "{}", s) + } else { + write!(f, "<{}>", s) + } + } + } + } +} + + +impl Options { + fn new(rep: bool, arg: Argument) -> Options { + Options { repeats: rep, arg: arg, is_desc: false, } + } +} + +impl Argument { + fn has_arg(&self) -> bool { + match *self { + Zero => false, + One(_) => true, + } + } +} + +#[doc(hidden)] +pub struct Argv<'a> { + /// A representation of an argv string as an ordered list of tokens. + /// This contains only positional arguments and commands. + positional: Vec, + /// Same as positional, but contains short and long flags. + /// Each flag may have an argument string. + flags: Vec, + /// Counts the number of times each flag appears. + counts: HashMap, + + // State for parser. + dopt: &'a Parser, + argv: Vec, + curi: usize, + options_first: bool, +} + +#[derive(Clone, Debug)] +struct ArgvToken { + atom: Atom, + arg: Option, +} + +impl<'a> Argv<'a> { + fn new(dopt: &'a Parser, argv: Vec, options_first: bool) + -> Result, String> { + let mut a = Argv { + positional: vec!(), + flags: vec!(), + counts: HashMap::new(), + dopt: dopt, + argv: argv.iter().cloned().collect(), + curi: 0, + options_first: options_first, + }; + try!(a.parse()); + for flag in &a.flags { + match a.counts.entry(flag.atom.clone()) { + Vacant(v) => { v.insert(1); } + Occupied(mut v) => { *v.get_mut() += 1; } + } + } + Ok(a) + } + + fn parse(&mut self) -> Result<(), String> { + let mut seen_double_dash = false; + while self.curi < self.argv.len() { + let do_flags = + !seen_double_dash + && (!self.options_first || self.positional.is_empty()); + + if do_flags && Atom::is_short(self.cur()) { + let stacked: String = self.cur()[1..].into(); + for (i, c) in stacked.chars().enumerate() { + let mut tok = ArgvToken { + atom: self.dopt.descs.resolve(&Short(c)), + arg: None, + }; + if !self.dopt.descs.contains_key(&tok.atom) { + err!("Unknown flag: '{}'", &tok.atom); + } + if !self.dopt.has_arg(&tok.atom) { + self.flags.push(tok); + } else { + let rest = &stacked[i+1..]; + tok.arg = Some( + if rest.is_empty() { + let arg = try!(self.next_arg(&tok.atom)); + arg.into() + } else { + rest.into() + } + ); + self.flags.push(tok); + // We've either produced an error or gobbled up the + // rest of these stacked short flags, so stop. + break + } + } + } else if do_flags && Atom::is_long_argv(self.cur()) { + let (atom, mut arg) = parse_long_equal_argv(self.cur()); + let atom = self.dopt.descs.resolve(&atom); + if !self.dopt.descs.contains_key(&atom) { + return self.err_unknown_flag(&atom) + } + if arg.is_some() && !self.dopt.has_arg(&atom) { + err!("Flag '{}' cannot have an argument, but found '{}'.", + &atom, arg.as_ref().unwrap()) + } else if arg.is_none() && self.dopt.has_arg(&atom) { + try!(self.next_noeof(&*format!("argument for flag '{}'", + &atom))); + arg = Some(self.cur().into()); + } + self.flags.push(ArgvToken { atom: atom, arg: arg }); + } else { + if !seen_double_dash && self.cur() == "--" { + seen_double_dash = true; + } else { + // Yup, we *always* insert a positional argument, which + // means we completely neglect `Command` here. + // This is because we can't tell whether something is a + // `command` or not until we start pattern matching. + let tok = ArgvToken { + atom: Positional(self.cur().into()), + arg: None, + }; + self.positional.push(tok); + } + } + self.next() + } + Ok(()) + } + + fn err_unknown_flag(&self, atom: &Atom) -> Result<(), String> { + use std::usize::MAX; + let mut best = String::new(); + let flag = atom.to_string(); + let mut min = MAX; + + let mut possibles = Vec::new(); + + for (key, _) in self.dopt.descs.synonyms() { + possibles.push(key); + } + + for key in self.dopt.descs.keys() { + possibles.push(key); + } + + for key in &possibles { + match **key { + Long(_) | Command(_) => { + let name = key.to_string(); + let dist = levenshtein(&flag, &name); + if dist < 3 && dist < min { + min = dist; + best = name; + } + } + _ => {} + } + } + if best.is_empty() { + err!("Unknown flag: '{}'", &atom); + } else { + err!("Unknown flag: '{}'. Did you mean '{}'?", &atom, &best) + } + } + + fn cur(&self) -> &str { self.at(0) } + fn at(&self, i: usize) -> &str { + &*self.argv[self.curi + i] + } + fn next(&mut self) { + if self.curi < self.argv.len() { + self.curi += 1 + } + } + fn next_arg(&mut self, atom: &Atom) -> Result<&str, String> { + let expected = format!("argument for flag '{}'", atom); + try!(self.next_noeof(&*expected)); + Ok(self.cur()) + } + fn next_noeof(&mut self, expected: &str) -> Result<(), String> { + self.next(); + if self.curi == self.argv.len() { + err!("Expected {} but reached end of arguments.", expected) + } + Ok(()) + } +} + +impl<'a> fmt::Debug for Argv<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + try!(writeln!(f, "Positional: {:?}", self.positional)); + try!(writeln!(f, "Flags: {:?}", self.flags)); + try!(writeln!(f, "Counts: {:?}", self.counts)); + Ok(()) + } +} + +struct Matcher<'a, 'b:'a> { + argv: &'a Argv<'b>, +} + +#[derive(Clone, Debug, PartialEq)] +struct MState { + argvi: usize, // index into Argv.positional + counts: HashMap, // flags remaining for pattern consumption + max_counts: HashMap, // optional flag appearances + vals: HashMap, +} + +impl MState { + fn fill_value(&mut self, key: Atom, rep: bool, arg: Option) + -> bool { + match (arg, rep) { + (None, false) => { + self.vals.insert(key, Switch(true)); + } + (Some(arg), false) => { + self.vals.insert(key, Plain(Some(arg))); + } + (None, true) => { + match self.vals.entry(key) { + Vacant(v) => { v.insert(Counted(1)); } + Occupied(mut v) => { + match *v.get_mut() { + Counted(ref mut c) => { *c += 1; } + _ => return false, + } + } + } + } + (Some(arg), true) => { + match self.vals.entry(key) { + Vacant(v) => { v.insert(List(vec!(arg))); } + Occupied(mut v) => { + match *v.get_mut() { + List(ref mut vs) => vs.push(arg), + _ => return false, + } + } + } + } + } + true + } + + fn add_value(&mut self, opts: &Options, + spec: &Atom, atom: &Atom, arg: &Option) -> bool { + assert!(opts.arg.has_arg() == arg.is_some(), + "'{:?}' should have an argument but doesn't", atom); + match *atom { + Short(_) | Long(_) => { + self.fill_value(spec.clone(), opts.repeats, arg.clone()) + } + Positional(ref v) => { + assert!(!opts.arg.has_arg()); + self.fill_value(spec.clone(), opts.repeats, Some(v.clone())) + } + Command(_) => { + assert!(!opts.arg.has_arg()); + self.fill_value(spec.clone(), opts.repeats, None) + } + } + } + + fn use_flag(&mut self, flag: &Atom) -> bool { + match self.max_counts.entry(flag.clone()) { + Vacant(v) => { v.insert(0); } + Occupied(_) => {} + } + match self.counts.entry(flag.clone()) { + Vacant(_) => { false } + Occupied(mut v) => { + let c = v.get_mut(); + if *c == 0 { + false + } else { + *c -= 1; + true + } + } + } + } + + fn use_optional_flag(&mut self, flag: &Atom) { + match self.max_counts.entry(flag.clone()) { + Vacant(v) => { v.insert(1); } + Occupied(mut v) => { *v.get_mut() += 1; } + } + } + + fn match_cmd_or_posarg(&mut self, spec: &Atom, argv: &ArgvToken) + -> Option { + match (spec, &argv.atom) { + (_, &Command(_)) => { + // This is impossible because the argv parser doesn't know + // how to produce `Command` values. + unreachable!() + } + (&Command(ref n1), &Positional(ref n2)) if n1 == n2 => { + // Coerce a positional to a command because the pattern + // demands it and the positional argument matches it. + self.argvi += 1; + Some(ArgvToken { atom: spec.clone(), arg: None }) + } + (&Positional(_), _) => { + self.argvi += 1; + Some(argv.clone()) + } + _ => None, + } + } +} + +impl<'a, 'b> Matcher<'a, 'b> { + fn matches(argv: &'a Argv, pat: &Pattern) + -> Option> { + let m = Matcher { argv: argv }; + let init = MState { + argvi: 0, + counts: argv.counts.clone(), + max_counts: HashMap::new(), + vals: HashMap::new(), + }; + m.states(pat, &init) + .into_iter() + .filter(|s| m.state_consumed_all_argv(s)) + .filter(|s| m.state_has_valid_flags(s)) + .filter(|s| m.state_valid_num_flags(s)) + .collect::>() + .into_iter() + .next() + .map(|mut s| { + m.add_flag_values(&mut s); + m.add_default_values(&mut s); + + // Build a synonym map so that it's easier to look up values. + let mut synmap: SynonymMap = + s.vals.into_iter() + .map(|(k, v)| (k.to_string(), v)) + .collect(); + for (from, to) in argv.dopt.descs.synonyms() { + let (from, to) = (from.to_string(), to.to_string()); + if synmap.contains_key(&to) { + synmap.insert_synonym(from, to); + } + } + synmap + }) + } + + fn token_from(&self, state: &MState) -> Option<&ArgvToken> { + self.argv.positional.get(state.argvi) + } + + fn add_value(&self, state: &mut MState, + atom_spec: &Atom, atom: &Atom, arg: &Option) + -> bool { + let opts = self.argv.dopt.descs.get(atom_spec); + state.add_value(opts, atom_spec, atom, arg) + } + + fn add_flag_values(&self, state: &mut MState) { + for tok in &self.argv.flags { + self.add_value(state, &tok.atom, &tok.atom, &tok.arg); + } + } + + fn add_default_values(&self, state: &mut MState) { + lazy_static! { + static ref SPLIT_SPACE: Regex = regex!(r"\s+"); + } + let vs = &mut state.vals; + for (a, opts) in self.argv.dopt.descs.iter() { + if vs.contains_key(a) { + continue + } + let atom = a.clone(); + match (opts.repeats, &opts.arg) { + (false, &Zero) => { + match *a { + Positional(_) => vs.insert(atom, Plain(None)), + _ => vs.insert(atom, Switch(false)), + }; + } + (true, &Zero) => { + match *a { + Positional(_) => vs.insert(atom, List(vec!())), + _ => vs.insert(atom, Counted(0)), + }; + } + (false, &One(None)) => { vs.insert(atom, Plain(None)); } + (true, &One(None)) => { vs.insert(atom, List(vec!())); } + (false, &One(Some(ref v))) => { + vs.insert(atom, Plain(Some(v.clone()))); + } + (true, &One(Some(ref v))) => { + let words = SPLIT_SPACE + .split(v) + .map(|s| s.to_owned()) + .collect(); + vs.insert(atom, List(words)); + } + } + } + } + + fn state_consumed_all_argv(&self, state: &MState) -> bool { + self.argv.positional.len() == state.argvi + } + + fn state_has_valid_flags(&self, state: &MState) -> bool { + self.argv.counts.keys().all(|flag| state.max_counts.contains_key(flag)) + } + + fn state_valid_num_flags(&self, state: &MState) -> bool { + state.counts.iter().all( + |(flag, count)| count <= &state.max_counts[flag]) + } + + fn states(&self, pat: &Pattern, init: &MState) -> Vec { + match *pat { + Alternates(ref ps) => { + let mut alt_states = vec!(); + for p in ps.iter() { + alt_states.extend(self.states(p, init).into_iter()); + } + alt_states + } + Sequence(ref ps) => { + let (mut states, mut next) = (vec!(), vec!()); + let mut iter = ps.iter(); + match iter.next() { + None => return vec!(init.clone()), + Some(p) => states.extend(self.states(p, init).into_iter()), + } + for p in iter { + for s in states.into_iter() { + next.extend(self.states(p, &s).into_iter()); + } + states = vec!(); + states.extend(next.into_iter()); + next = vec!(); + } + states + } + Optional(ref ps) => { + let mut base = init.clone(); + let mut noflags = vec!(); + for p in ps.iter() { + match p { + // Prevent exponential growth in cases like [--flag...] + // See https://github.com/docopt/docopt.rs/issues/195 + &Repeat(ref b) => match &**b { + &PatAtom(ref a @ Short(_)) + | &PatAtom(ref a @ Long(_)) => { + let argv_count = self.argv.counts.get(a) + .map_or(0, |&x| x); + let max_count = base.max_counts.get(a) + .map_or(0, |&x| x); + if argv_count > max_count { + for _ in max_count..argv_count { + base.use_optional_flag(a); + } + } + } + _ => { + noflags.push(p); + } + }, + &PatAtom(ref a @ Short(_)) + | &PatAtom(ref a @ Long(_)) => { + let argv_count = self.argv.counts.get(a) + .map_or(0, |&x| x); + let max_count = base.max_counts.get(a) + .map_or(0, |&x| x); + if argv_count > max_count { + base.use_optional_flag(a); + } + } + other => { + noflags.push(other); + } + } + } + let mut states = vec!(); + self.all_option_states(&base, &mut states, &*noflags); + states + } + Repeat(ref p) => { match &**p { + &PatAtom(ref a @ Short(_)) + | &PatAtom(ref a @ Long(_)) => { + let mut bases = self.states(&**p, init); + for base in &mut bases { + let argv_count = self.argv.counts.get(a) + .map_or(0, |&x| x); + let max_count = base.max_counts.get(a) + .map_or(0, |&x| x); + if argv_count > max_count { + for _ in max_count..argv_count { + base.use_optional_flag(a); + } + } + } + bases + } + _ => { + let mut grouped_states = vec!(self.states(&**p, init)); + loop { + let mut nextss = vec!(); + for s in grouped_states.last().unwrap().iter() { + nextss.extend( + self.states(&**p, s) + .into_iter() + .filter(|snext| snext != s)); + } + if nextss.is_empty() { + break + } + grouped_states.push(nextss); + } + grouped_states + .into_iter() + .flat_map(|ss| ss.into_iter()) + .collect::>() + } + }} + PatAtom(ref atom) => { + let mut state = init.clone(); + match *atom { + Short(_) | Long(_) => { + if !state.use_flag(atom) { + return vec!() + } + } + Command(_) | Positional(_) => { + let tok = + match self.token_from(init) { + None => return vec!(), + Some(tok) => tok, + }; + let tok = + match state.match_cmd_or_posarg(atom, tok) { + None => return vec!(), + Some(tok) => tok, + }; + if !self.add_value(&mut state, atom, + &tok.atom, &tok.arg) { + return vec!() + } + } + } + vec!(state) + } + } + } + + fn all_option_states(&self, base: &MState, states: &mut Vec, + pats: &[&Pattern]) { + if pats.is_empty() { + states.push(base.clone()); + } else { + let (pat, rest) = (*pats.first().unwrap(), &pats[1..]); + for s in self.states(pat, base).into_iter() { + self.all_option_states(&s, states, rest); + } + // Order is important here! This must come after the loop above + // because we prefer presence over absence. The first state wins. + self.all_option_states(base, states, &pats[1..]); + } + } +} + +// Tries to parse a long flag of the form '--flag[=arg]' and returns a tuple +// with the flag atom and whether there is an argument or not. +// If '=arg' exists and 'arg' isn't a valid argument, an error is returned. +fn parse_long_equal(flag: &str) -> Result<(Atom, Argument), String> { + lazy_static! { + static ref LONG_EQUAL: Regex = regex!("^(?P[^=]+)=(?P.+)$"); + } + match LONG_EQUAL.captures(flag) { + None => Ok((Atom::new(flag), Zero)), + Some(cap) => { + let arg = cap_or_empty(&cap, "arg"); + if !Atom::is_arg(arg) { + err!("Argument '{}' for flag '{}' is not in the \ + form ARG or .", flag, arg) + } + Ok((Atom::new(cap_or_empty(&cap, "name")), One(None))) + } + } +} + +fn parse_long_equal_argv(flag: &str) -> (Atom, Option) { + lazy_static! { + static ref LONG_EQUAL: Regex = regex!("^(?P[^=]+)=(?P.*)$"); + } + match LONG_EQUAL.captures(flag) { + None => (Atom::new(flag), None), + Some(cap) => ( + Atom::new(cap_or_empty(&cap, "name")), + Some(cap_or_empty(&cap, "arg").to_string()), + ), + } +} + +// Tokenizes a usage pattern. +// Beware: regex hack ahead. Tokenizes based on whitespace separated words. +// It first normalizes `[xyz]` -> `[ xyz ]` so that delimiters are tokens. +// Similarly for `...`, `(`, `)` and `|`. +// One hitch: `--flag=` is allowed, so we use a regex to pick out +// words. +fn pattern_tokens(pat: &str) -> Vec { + lazy_static! { + static ref NORMALIZE: Regex = regex!(r"\.\.\.|\[|\]|\(|\)|\|"); + static ref WORDS: Regex = regex!(r"--\S+?=<[^>]+>|<[^>]+>|\S+"); + } + + let pat = NORMALIZE.replace_all(pat.trim(), " $0 "); + let mut words = vec!(); + for cap in WORDS.captures_iter(&*pat) { + words.push(cap[0].to_string()); + } + words +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/src/synonym.rs cargo-0.19.0/vendor/docopt-0.7.0/src/synonym.rs --- cargo-0.17.0/vendor/docopt-0.7.0/src/synonym.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/src/synonym.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,107 @@ +use std::collections::HashMap; +use std::collections::hash_map::{Iter, Keys}; +use std::fmt::Debug; +use std::hash::Hash; +use std::iter::{FromIterator, IntoIterator}; +use std::mem; + +#[derive(Clone)] +pub struct SynonymMap { + vals: HashMap, + syns: HashMap, +} + +impl SynonymMap { + pub fn new() -> SynonymMap { + SynonymMap { + vals: HashMap::new(), + syns: HashMap::new(), + } + } + + pub fn insert_synonym(&mut self, from: K, to: K) -> bool { + assert!(self.vals.contains_key(&to)); + self.syns.insert(from, to).is_none() + } + + pub fn keys(&self) -> Keys { + self.vals.keys() + } + + pub fn iter(&self) -> Iter { + self.vals.iter() + } + + pub fn synonyms(&self) -> Iter { + self.syns.iter() + } + + pub fn find(&self, k: &K) -> Option<&V> { + self.with_key(k, |k| self.vals.get(k)) + } + + pub fn contains_key(&self, k: &K) -> bool { + self.with_key(k, |k| self.vals.contains_key(k)) + } + + pub fn len(&self) -> usize { + self.vals.len() + } + + fn with_key(&self, k: &K, with: F) -> T where F: FnOnce(&K) -> T { + if self.syns.contains_key(k) { + with(&self.syns[k]) + } else { + with(k) + } + } +} + +impl SynonymMap { + pub fn resolve(&self, k: &K) -> K { + self.with_key(k, |k| k.clone()) + } + + pub fn get<'a>(&'a self, k: &K) -> &'a V { + self.find(k).unwrap() + } + + pub fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> { + if self.syns.contains_key(k) { + self.vals.get_mut(&self.syns[k]) + } else { + self.vals.get_mut(k) + } + } + + pub fn swap(&mut self, k: K, mut new: V) -> Option { + if self.syns.contains_key(&k) { + let old = self.vals.get_mut(&k).unwrap(); + mem::swap(old, &mut new); + Some(new) + } else { + self.vals.insert(k, new) + } + } + + pub fn insert(&mut self, k: K, v: V) -> bool { + self.swap(k, v).is_none() + } +} + +impl FromIterator<(K, V)> for SynonymMap { + fn from_iter>(iter: T) -> SynonymMap { + let mut map = SynonymMap::new(); + for (k, v) in iter { + map.insert(k, v); + } + map + } +} + +impl Debug for SynonymMap { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + try!(self.vals.fmt(f)); + write!(f, " (synomyns: {:?})", self.syns) + } +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/src/test/mod.rs cargo-0.19.0/vendor/docopt-0.7.0/src/test/mod.rs --- cargo-0.17.0/vendor/docopt-0.7.0/src/test/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/src/test/mod.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,111 @@ +use std::collections::HashMap; +use {Docopt, ArgvMap}; +use Value::{self, Switch, Plain}; + +fn get_args(doc: &str, argv: &[&'static str]) -> ArgvMap { + let dopt = match Docopt::new(doc) { + Err(err) => panic!("Invalid usage: {}", err), + Ok(dopt) => dopt, + }; + match dopt.argv(vec!["cmd"].iter().chain(argv.iter())).parse() { + Err(err) => panic!("{}", err), + Ok(vals) => vals, + } +} + +fn map_from_alist(alist: Vec<(&'static str, Value)>) + -> HashMap { + alist.into_iter().map(|(k, v)| (k.to_string(), v)).collect() +} + +fn same_args(expected: &HashMap, got: &ArgvMap) { + for (k, ve) in expected.iter() { + match got.map.find(k) { + None => panic!("EXPECTED has '{}' but GOT does not.", k), + Some(vg) => { + assert!(ve == vg, + "{}: EXPECTED = '{:?}' != '{:?}' = GOT", k, ve, vg) + } + } + } + for (k, vg) in got.map.iter() { + match got.map.find(k) { + None => panic!("GOT has '{}' but EXPECTED does not.", k), + Some(ve) => { + assert!(vg == ve, + "{}: GOT = '{:?}' != '{:?}' = EXPECTED", k, vg, ve) + } + } + } +} + +macro_rules! test_expect( + ($name:ident, $doc:expr, $args:expr, $expected:expr) => ( + #[test] + fn $name() { + let vals = get_args($doc, $args); + let expected = map_from_alist($expected); + same_args(&expected, &vals); + } + ); +); + +macro_rules! test_user_error( + ($name:ident, $doc:expr, $args:expr) => ( + #[test] + #[should_panic] + fn $name() { get_args($doc, $args); } + ); +); + +test_expect!(test_issue_13, "Usage: prog file ", &["file", "file"], + vec![("file", Switch(true)), + ("", Plain(Some("file".to_string())))]); + +test_expect!(test_issue_129, "Usage: prog [options] + +Options: + --foo ARG Foo foo.", + &["--foo=a b"], + vec![("--foo", Plain(Some("a b".into())))]); + +#[test] +fn regression_issue_12() { + const USAGE: &'static str = " + Usage: + whisper info + whisper update + whisper mark + "; + + #[derive(RustcDecodable, Debug)] + struct Args { + arg_file: String, + cmd_info: bool, + cmd_update: bool, + arg_timestamp: u64, + arg_value: f64 + } + + let dopt: Args = Docopt::new(USAGE).unwrap() + .argv(&["whisper", "mark", "./p/blah", "100"]) + .decode().unwrap(); + assert_eq!(dopt.arg_timestamp, 0); +} + +#[test] +fn regression_issue_195() { + const USAGE: &'static str = " + Usage: + slow [-abcdefghijklmnopqrs...] + "; + + let argv = &["slow", "-abcdefghijklmnopqrs"]; + let dopt : Docopt = Docopt::new(USAGE).unwrap().argv(argv); + + dopt.parse().unwrap(); +} + + +mod testcases; +mod suggestions; diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/src/test/suggestions.rs cargo-0.19.0/vendor/docopt-0.7.0/src/test/suggestions.rs --- cargo-0.17.0/vendor/docopt-0.7.0/src/test/suggestions.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/src/test/suggestions.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,72 @@ +use {Docopt, Error}; + +fn get_suggestion(doc: &str, argv: &[&'static str]) -> Error { + let dopt = + match Docopt::new(doc) { + Err(err) => panic!("Invalid usage: {}", err), + Ok(dopt) => dopt, + }; + let mut argv: Vec<_> = argv.iter().map(|x| x.to_string()).collect(); + argv.insert(0, "prog".to_string()); + match dopt.argv(argv.into_iter()).parse() { + Err(err) => err, + Ok(_) => panic!("Should have been a user error"), + } +} + +macro_rules! test_suggest( + ($name:ident, $doc:expr, $args:expr, $expected:expr) => ( + #[test] + fn $name() { + let sg = get_suggestion($doc, $args); + println!("{}", sg); + match sg { + Error::WithProgramUsage(e, _) => { + match *e { + Error::Argv(msg) => { + println!("{:?}",msg); + assert_eq!(msg, $expected); + } + err => panic!("Error other than argv: {:?}", err) + } + }, + _ => panic!("Error without program usage") + } + } + ); +); + + +test_suggest!(test_suggest_1, "Usage: prog [--release]", &["--releas"], "Unknown flag: '--releas'. Did you mean '--release'?"); + +test_suggest!(test_suggest_2, +"Usage: prog [-a] + prog [-a] ... + prog [-e] + Options: + -a, --archive Copy everything. +", +&["-d"], "Unknown flag: '-d'"); + + +test_suggest!(test_suggest_3, +"Usage: prog [-a] + prog [-a] ... + prog [-e] + Options: + -a, --archive Copy everything. + -e, --export Export all the things. +", +&["--expotr"], "Unknown flag: '--expotr'. Did you mean '--export'?"); + + +test_suggest!(test_suggest_4, +"Usage: prog [--import] [--complete] +", +&["--mport", "--complte"], "Unknown flag: '--mport'. Did you mean '--import'?"); + +test_suggest!(test_suggest_5, +"Usage: prog [--import] [--complete] +", +&["--import", "--complte"], "Unknown flag: '--complte'. Did you mean '--complete'?"); + diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/src/test/testcases.docopt cargo-0.19.0/vendor/docopt-0.7.0/src/test/testcases.docopt --- cargo-0.17.0/vendor/docopt-0.7.0/src/test/testcases.docopt 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/src/test/testcases.docopt 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,1122 @@ +r"""Usage: prog + +""" +$ prog +{} + +$ prog --xxx +"user-error" + + +r"""Usage: prog [options] + +Options: -a All. + +""" +$ prog +{"-a": false} + +$ prog -a +{"-a": true} + +$ prog -x +"user-error" + + +r"""Usage: prog [options] + +Options: --all All. + +""" +$ prog +{"--all": false} + +$ prog --all +{"--all": true} + +$ prog --xxx +"user-error" + + +r"""Usage: prog [options] + +Options: -v, --verbose Verbose. + +""" +$ prog --verbose +{"--verbose": true} + +$ prog --ver +"user-error" + +$ prog -v +{"--verbose": true} + + +r"""Usage: prog [options] + +Options: -p PATH + +""" +$ prog -p home/ +{"-p": "home/"} + +$ prog -phome/ +{"-p": "home/"} + +$ prog -p +"user-error" + + +r"""Usage: prog [options] + +Options: --path + +""" +$ prog --path home/ +{"--path": "home/"} + +$ prog --path=home/ +{"--path": "home/"} + +$ prog --pa home/ +"user-error" + +$ prog --pa=home/ +"user-error" + +$ prog --path +"user-error" + + +r"""Usage: prog [options] + +Options: -p PATH, --path= Path to files. + +""" +$ prog -proot +{"--path": "root"} + + +r"""Usage: prog [options] + +Options: -p --path PATH Path to files. + +""" +$ prog -p root +{"--path": "root"} + +$ prog --path root +{"--path": "root"} + + +r"""Usage: prog [options] + +Options: + -p PATH Path to files [default: ./] + +""" +$ prog +{"-p": "./"} + +$ prog -phome +{"-p": "home"} + + +r"""UsAgE: prog [options] + +OpTiOnS: --path= Path to files + [dEfAuLt: /root] + +""" +$ prog +{"--path": "/root"} + +$ prog --path=home +{"--path": "home"} + + +r"""usage: prog [options] + +options: + -a Add + -r Remote + -m Message + +""" +$ prog -a -r -m Hello +{"-a": true, + "-r": true, + "-m": "Hello"} + +$ prog -armyourass +{"-a": true, + "-r": true, + "-m": "yourass"} + +$ prog -a -r +{"-a": true, + "-r": true, + "-m": null} + + +r"""Usage: prog [options] + +Options: --version + --verbose + +""" +$ prog --version +{"--version": true, + "--verbose": false} + +$ prog --verbose +{"--version": false, + "--verbose": true} + +$ prog --ver +"user-error" + +$ prog --verb +"user-error" + + +r"""usage: prog [-a -r -m ] + +options: + -a Add + -r Remote + -m Message + +""" +$ prog -armyourass +{"-a": true, + "-r": true, + "-m": "yourass"} + + +r"""usage: prog [-armMSG] + +options: -a Add + -r Remote + -m Message + +""" +$ prog -a -r -m Hello +{"-a": true, + "-r": true, + "-m": "Hello"} + + +r"""usage: prog -a -b + +options: + -a + -b + +""" +$ prog -a -b +{"-a": true, "-b": true} + +$ prog -b -a +{"-a": true, "-b": true} + +$ prog -a +"user-error" + +$ prog +"user-error" + + +r"""usage: prog (-a -b) + +options: -a + -b + +""" +$ prog -a -b +{"-a": true, "-b": true} + +$ prog -b -a +{"-a": true, "-b": true} + +$ prog -a +"user-error" + +$ prog +"user-error" + + +r"""usage: prog [-a] -b + +options: -a + -b + +""" +$ prog -a -b +{"-a": true, "-b": true} + +$ prog -b -a +{"-a": true, "-b": true} + +$ prog -a +"user-error" + +$ prog -b +{"-a": false, "-b": true} + +$ prog +"user-error" + + +r"""usage: prog [(-a -b)] + +options: -a + -b + +""" +$ prog -a -b +{"-a": true, "-b": true} + +$ prog -b -a +{"-a": true, "-b": true} + +$ prog -a +"user-error" + +$ prog -b +"user-error" + +$ prog +{"-a": false, "-b": false} + + +r"""usage: prog (-a|-b) + +options: -a + -b + +""" +$ prog -a -b +"user-error" + +$ prog +"user-error" + +$ prog -a +{"-a": true, "-b": false} + +$ prog -b +{"-a": false, "-b": true} + + +r"""usage: prog [ -a | -b ] + +options: -a + -b + +""" +$ prog -a -b +"user-error" + +$ prog +{"-a": false, "-b": false} + +$ prog -a +{"-a": true, "-b": false} + +$ prog -b +{"-a": false, "-b": true} + + +r"""usage: prog """ +$ prog 10 +{"": "10"} + +$ prog 10 20 +"user-error" + +$ prog +"user-error" + + +r"""usage: prog []""" +$ prog 10 +{"": "10"} + +$ prog 10 20 +"user-error" + +$ prog +{"": null} + + +r"""usage: prog """ +$ prog 10 20 40 +{"": "10", "": "20", "": "40"} + +$ prog 10 20 +"user-error" + +$ prog +"user-error" + + +r"""usage: prog [ ]""" +$ prog 10 20 40 +{"": "10", "": "20", "": "40"} + +$ prog 10 20 +{"": "10", "": "20", "": null} + +$ prog +"user-error" + + +r"""usage: prog [ | ]""" +$ prog 10 20 40 +"user-error" + +$ prog 20 40 +{"": null, "": "20", "": "40"} + +$ prog +{"": null, "": null, "": null} + + +r"""usage: prog ( --all | ) + +options: + --all + +""" +$ prog 10 --all +{"": "10", "--all": true, "": null} + +$ prog 10 +{"": null, "--all": false, "": "10"} + +$ prog +"user-error" + + +r"""usage: prog [ ]""" +$ prog 10 20 +{"": ["10", "20"]} + +$ prog 10 +{"": ["10"]} + +$ prog +{"": []} + + +r"""usage: prog [( )]""" +$ prog 10 20 +{"": ["10", "20"]} + +$ prog 10 +"user-error" + +$ prog +{"": []} + + +r"""usage: prog NAME...""" +$ prog 10 20 +{"NAME": ["10", "20"]} + +$ prog 10 +{"NAME": ["10"]} + +$ prog +"user-error" + + +r"""usage: prog [NAME]...""" +$ prog 10 20 +{"NAME": ["10", "20"]} + +$ prog 10 +{"NAME": ["10"]} + +$ prog +{"NAME": []} + + +r"""usage: prog [NAME...]""" +$ prog 10 20 +{"NAME": ["10", "20"]} + +$ prog 10 +{"NAME": ["10"]} + +$ prog +{"NAME": []} + + +r"""usage: prog [NAME [NAME ...]]""" +$ prog 10 20 +{"NAME": ["10", "20"]} + +$ prog 10 +{"NAME": ["10"]} + +$ prog +{"NAME": []} + + +r"""usage: prog (NAME | --foo NAME) + +options: --foo + +""" +$ prog 10 +{"NAME": "10", "--foo": false} + +$ prog --foo 10 +{"NAME": "10", "--foo": true} + +$ prog --foo=10 +"user-error" + + +r"""usage: prog (NAME | --foo) [--bar | NAME] + +options: --foo +options: --bar + +""" +$ prog 10 +{"NAME": ["10"], "--foo": false, "--bar": false} + +$ prog 10 20 +{"NAME": ["10", "20"], "--foo": false, "--bar": false} + +$ prog --foo --bar +{"NAME": [], "--foo": true, "--bar": true} + + +r"""Naval Fate. + +Usage: + prog ship new ... + prog ship [] move [--speed=] + prog ship shoot + prog mine (set|remove) [--moored|--drifting] + prog -h | --help + prog --version + +Options: + -h --help Show this screen. + --version Show version. + --speed= Speed in knots [default: 10]. + --moored Mored (anchored) mine. + --drifting Drifting mine. + +""" +$ prog ship Guardian move 150 300 --speed=20 +{"--drifting": false, + "--help": false, + "--moored": false, + "--speed": "20", + "--version": false, + "": ["Guardian"], + "": "150", + "": "300", + "mine": false, + "move": true, + "new": false, + "remove": false, + "set": false, + "ship": true, + "shoot": false} + + +r"""usage: prog --hello""" +$ prog --hello +{"--hello": true} + + +r"""usage: prog [--hello=]""" +$ prog +{"--hello": null} + +$ prog --hello wrld +{"--hello": "wrld"} + + +r"""usage: prog [-o]""" +$ prog +{"-o": false} + +$ prog -o +{"-o": true} + + +r"""usage: prog [-opr]""" +$ prog -op +{"-o": true, "-p": true, "-r": false} + + +r"""usage: prog --aabb | --aa""" +$ prog --aa +{"--aabb": false, "--aa": true} + +$ prog --a +"user-error" # not a unique prefix + +# +# Counting number of flags +# + +r"""Usage: prog -v""" +$ prog -v +{"-v": true} + + +r"""Usage: prog [-v -v]""" +$ prog +{"-v": 0} + +$ prog -v +{"-v": 1} + +$ prog -vv +{"-v": 2} + + +r"""Usage: prog -v ...""" +$ prog +"user-error" + +$ prog -v +{"-v": 1} + +$ prog -vv +{"-v": 2} + +$ prog -vvvvvv +{"-v": 6} + + +r"""Usage: prog [-v | -vv | -vvv] + +This one is probably most readable user-friednly variant. + +""" +$ prog +{"-v": 0} + +$ prog -v +{"-v": 1} + +$ prog -vv +{"-v": 2} + +$ prog -vvvv +"user-error" + + +r"""usage: prog [--ver --ver]""" +$ prog --ver --ver +{"--ver": 2} + + +# +# Counting commands +# + +r"""usage: prog [go]""" +$ prog go +{"go": true} + + +r"""usage: prog [go go]""" +$ prog +{"go": 0} + +$ prog go +{"go": 1} + +$ prog go go +{"go": 2} + +$ prog go go go +"user-error" + +r"""usage: prog go...""" +$ prog go go go go go +{"go": 5} + +# +# [options] does not include options from usage-pattern +# +r"""usage: prog [options] [-a] + +options: -a + -b +""" +$ prog -a +{"-a": true, "-b": false} + +$ prog -aa +"user-error" + +# +# Test [options] shourtcut +# + +r"""Usage: prog [options] A + +Options: + -q Be quiet + -v Be verbose. + +""" +$ prog arg +{"A": "arg", "-v": false, "-q": false} + +$ prog -v arg +{"A": "arg", "-v": true, "-q": false} + +$ prog -q arg +{"A": "arg", "-v": false, "-q": true} + +# +# Test single dash +# + +r"""usage: prog [-]""" + +$ prog - +{"-": true} + +$ prog +{"-": false} + +# +# If argument is repeated, its value should always be a list +# + +r"""usage: prog [NAME [NAME ...]]""" + +$ prog a b +{"NAME": ["a", "b"]} + +$ prog +{"NAME": []} + +# +# Option's argument defaults to null/None +# + +r"""usage: prog [options] + +options: + -a Add + -m Message + +""" +$ prog -a +{"-m": null, "-a": true} + +# +# Test options without description +# + +r"""usage: prog --hello""" +$ prog --hello +{"--hello": true} + +r"""usage: prog [--hello=]""" +$ prog +{"--hello": null} + +$ prog --hello wrld +{"--hello": "wrld"} + +r"""usage: prog [-o]""" +$ prog +{"-o": false} + +$ prog -o +{"-o": true} + +r"""usage: prog [-opr]""" +$ prog -op +{"-o": true, "-p": true, "-r": false} + +r"""usage: git [-v | --verbose]""" +$ prog -v +{"-v": true, "--verbose": false} + +r"""usage: git remote [-v | --verbose]""" +$ prog remote -v +{"remote": true, "-v": true, "--verbose": false} + +# +# Test empty usage pattern +# + +r"""usage: prog""" +$ prog +{} + +r"""usage: prog + prog +""" +$ prog 1 2 +{"": "1", "": "2"} + +$ prog +{"": null, "": null} + +r"""usage: prog + prog +""" +$ prog +{"": null, "": null} + +# +# Option's argument should not capture default value from usage pattern +# + +r"""usage: prog [--file=]""" +$ prog +{"--file": null} + +r"""usage: prog [--file=] + +options: --file + +""" +$ prog +{"--file": null} + +r"""Usage: prog [-a ] + +Options: -a, --address TCP address [default: localhost:6283]. + +""" +$ prog +{"--address": "localhost:6283"} + +# +# If option with argument could be repeated, +# its arguments should be accumulated into a list +# + +r"""usage: prog --long= ...""" + +$ prog --long one +{"--long": ["one"]} + +$ prog --long one --long two +{"--long": ["one", "two"]} + +# +# Test multiple elements repeated at once +# + +r"""usage: prog (go --speed=)...""" +$ prog go left --speed=5 go right --speed=9 +{"go": 2, "": ["left", "right"], "--speed": ["5", "9"]} + +# +# Required options should work with option shortcut +# + +r"""usage: prog [options] -a + +options: -a + +""" +$ prog -a +{"-a": true} + +# +# If option could be repeated its defaults should be split into a list +# + +r"""usage: prog [-o ]... + +options: -o [default: x] + +""" +$ prog -o this -o that +{"-o": ["this", "that"]} + +$ prog +{"-o": ["x"]} + +r"""usage: prog [-o ]... + +options: -o [default: x y] + +""" +$ prog -o this +{"-o": ["this"]} + +$ prog +{"-o": ["x", "y"]} + +# +# Test stacked option's argument +# + +r"""usage: prog -pPATH + +options: -p PATH + +""" +$ prog -pHOME +{"-p": "HOME"} + +# +# Issue 56: Repeated mutually exclusive args give nested lists sometimes +# + +r"""Usage: foo (--xx=X|--yy=Y)...""" +$ prog --xx=1 --yy=2 +{"--xx": ["1"], "--yy": ["2"]} + +# +# POSIXly correct tokenization +# + +r"""usage: prog []""" +$ prog f.txt +{"": "f.txt"} + +r"""usage: prog [--input=]...""" +$ prog --input a.txt --input=b.txt +{"--input": ["a.txt", "b.txt"]} + +# +# Issue 85: `[options]` shourtcut with multiple subcommands +# + +r"""usage: prog good [options] + prog fail [options] + +options: --loglevel=N + +""" +$ prog fail --loglevel 5 +{"--loglevel": "5", "fail": true, "good": false} + +# +# Usage-section syntax +# + +r"""usage:prog --foo""" +$ prog --foo +{"--foo": true} + +r"""PROGRAM USAGE: prog --foo""" +$ prog --foo +{"--foo": true} + +r"""Usage: prog --foo + prog --bar +NOT PART OF SECTION""" +$ prog --foo +{"--foo": true, "--bar": false} + +r"""Usage: + prog --foo + prog --bar + +NOT PART OF SECTION""" +$ prog --foo +{"--foo": true, "--bar": false} + +r"""Usage: + prog --foo + prog --bar +NOT PART OF SECTION""" +$ prog --foo +{"--foo": true, "--bar": false} + +# +# Options-section syntax +# + +r"""Usage: prog [options] + +global options: --foo +local options: --baz + --bar +other options: + --egg + --spam +-not-an-option- + +""" +$ prog --bar --egg +{"--bar": true, "--egg": true, "--spam": false} + +r"""Usage: prog [-a] [--] [...]""" +$ program -a +{"-a": true, "": []} + +r"""Usage: prog [-a] [--] [...]""" +$ program -- +{"-a": false, "": []} + +r"""Usage: prog [-a] [--] [...]""" +$ program -a -- -b +{"-a": true, "": ["-b"]} + +r"""Usage: prog [-a] [--] [...]""" +$ program -a -- -a +{"-a": true, "": ["-a"]} + +r"""Usage: prog [-a] [--] [...]""" +$ program -- -a +{"-a": false, "": ["-a"]} + +r"""Usage: prog test [options] [--] [...]""" +$ program test a -- -b +{"": ["a", "-b"]} + +r"""Usage: prog test [options] [--] [...]""" +$ program test -- -b +{"": ["-b"]} + +r"""Usage: prog test [options] [--] [...]""" +$ program test a -b +"user-error" + +r"""Usage: prog test [options] [--] [...]""" +$ program test -- -b -- +{"": ["-b", "--"]} + +r"""Usage: prog [options] + +Options: + -a ... Foo +""" +$ program +{"-a": 0} +$ program -a +{"-a": 1} +$ program -a -a +{"-a": 2} +$ program -aa +{"-a": 2} +$ program -a -a -a +{"-a": 3} +$ program -aaa +{"-a": 3} + +r"""Usage: prog [options] + +Options: + -a, --all ... Foo +""" +$ program +{"-a": 0} +$ program -a +{"-a": 1} +$ program -a --all +{"-a": 2} +$ program -aa --all +{"-a": 3} +$ program --all +{"-a": 1} +$ program --all --all +{"-a": 2} + +r"""Usage: prog [options] + +Options: + -a, --all ARG ... Foo +""" +$ program +{"-a": []} +$ program -a 1 +{"-a": ["1"]} +$ program -a 2 --all 3 +{"-a": ["2", "3"]} +$ program -a4 -a5 --all 6 +{"-a": ["4", "5", "6"]} +$ program --all 7 +{"-a": ["7"]} +$ program --all 8 --all 9 +{"-a": ["8", "9"]} + +r"""Usage: prog [options] + +Options: + --all ... Foo +""" +$ program +{"--all": 0} +$ program --all +{"--all": 1} +$ program --all --all +{"--all": 2} + +r"""Usage: prog [options] + +Options: + --all=ARG ... Foo +""" +$ program +{"--all": []} +$ program --all 1 +{"--all": ["1"]} +$ program --all 2 --all 3 +{"--all": ["2", "3"]} + +r"""Usage: prog [options] + +Options: + --all ... Foo +""" +$ program --all --all +"user-error" + +r"""Usage: prog [options] + +Options: + --all ARG ... Foo +""" +$ program --all foo --all bar +"user-error" + +r"""Usage: prog --speed=ARG""" +$ program --speed 20 +{"--speed": "20"} +$ program --speed=20 +{"--speed": "20"} +$ program --speed=-20 +{"--speed": "-20"} +$ program --speed -20 +{"--speed": "-20"} + +# +# Issue 187: Fails to parse a default value containing ']' +# + +r"""usage: prog [--datetime=] + +options: --datetime= Regex for datetimes [default: ^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}] + +""" +$ prog +{"--datetime": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}"} + +# +# Issue 137: -x-y being seen as a positional argument +# + +r"""Usage: prog [options] + +Options: + -x ARG + -y""" +$ prog -x-y +{"-x": "-y"} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/src/test/testcases.rs cargo-0.19.0/vendor/docopt-0.7.0/src/test/testcases.rs --- cargo-0.17.0/vendor/docopt-0.7.0/src/test/testcases.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/src/test/testcases.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,801 @@ +// !!! ATTENTION !!! +// This file is automatically generated by `scripts/mk-testcases`. +// Please do not edit this file directly! + +use Value::{Switch, Counted, Plain, List}; +use test::{get_args, map_from_alist, same_args}; + +test_expect!(test_0_testcases, "Usage: prog", &[], vec!()); + +test_user_error!(test_1_testcases, "Usage: prog", &["--xxx"]); + +test_expect!(test_2_testcases, "Usage: prog [options] + +Options: -a All.", &[], vec!(("-a", Switch(false)))); + +test_expect!(test_3_testcases, "Usage: prog [options] + +Options: -a All.", &["-a"], vec!(("-a", Switch(true)))); + +test_user_error!(test_4_testcases, "Usage: prog [options] + +Options: -a All.", &["-x"]); + +test_expect!(test_5_testcases, "Usage: prog [options] + +Options: --all All.", &[], vec!(("--all", Switch(false)))); + +test_expect!(test_6_testcases, "Usage: prog [options] + +Options: --all All.", &["--all"], vec!(("--all", Switch(true)))); + +test_user_error!(test_7_testcases, "Usage: prog [options] + +Options: --all All.", &["--xxx"]); + +test_expect!(test_8_testcases, "Usage: prog [options] + +Options: -v, --verbose Verbose.", &["--verbose"], vec!(("--verbose", Switch(true)))); + +test_user_error!(test_9_testcases, "Usage: prog [options] + +Options: -v, --verbose Verbose.", &["--ver"]); + +test_expect!(test_10_testcases, "Usage: prog [options] + +Options: -v, --verbose Verbose.", &["-v"], vec!(("--verbose", Switch(true)))); + +test_expect!(test_11_testcases, "Usage: prog [options] + +Options: -p PATH", &["-p", "home/"], vec!(("-p", Plain(Some("home/".to_string()))))); + +test_expect!(test_12_testcases, "Usage: prog [options] + +Options: -p PATH", &["-phome/"], vec!(("-p", Plain(Some("home/".to_string()))))); + +test_user_error!(test_13_testcases, "Usage: prog [options] + +Options: -p PATH", &["-p"]); + +test_expect!(test_14_testcases, "Usage: prog [options] + +Options: --path ", &["--path", "home/"], vec!(("--path", Plain(Some("home/".to_string()))))); + +test_expect!(test_15_testcases, "Usage: prog [options] + +Options: --path ", &["--path=home/"], vec!(("--path", Plain(Some("home/".to_string()))))); + +test_user_error!(test_16_testcases, "Usage: prog [options] + +Options: --path ", &["--pa", "home/"]); + +test_user_error!(test_17_testcases, "Usage: prog [options] + +Options: --path ", &["--pa=home/"]); + +test_user_error!(test_18_testcases, "Usage: prog [options] + +Options: --path ", &["--path"]); + +test_expect!(test_19_testcases, "Usage: prog [options] + +Options: -p PATH, --path= Path to files.", &["-proot"], vec!(("--path", Plain(Some("root".to_string()))))); + +test_expect!(test_20_testcases, "Usage: prog [options] + +Options: -p --path PATH Path to files.", &["-p", "root"], vec!(("--path", Plain(Some("root".to_string()))))); + +test_expect!(test_21_testcases, "Usage: prog [options] + +Options: -p --path PATH Path to files.", &["--path", "root"], vec!(("--path", Plain(Some("root".to_string()))))); + +test_expect!(test_22_testcases, "Usage: prog [options] + +Options: + -p PATH Path to files [default: ./]", &[], vec!(("-p", Plain(Some("./".to_string()))))); + +test_expect!(test_23_testcases, "Usage: prog [options] + +Options: + -p PATH Path to files [default: ./]", &["-phome"], vec!(("-p", Plain(Some("home".to_string()))))); + +test_expect!(test_24_testcases, "UsAgE: prog [options] + +OpTiOnS: --path= Path to files + [dEfAuLt: /root]", &[], vec!(("--path", Plain(Some("/root".to_string()))))); + +test_expect!(test_25_testcases, "UsAgE: prog [options] + +OpTiOnS: --path= Path to files + [dEfAuLt: /root]", &["--path=home"], vec!(("--path", Plain(Some("home".to_string()))))); + +test_expect!(test_26_testcases, "usage: prog [options] + +options: + -a Add + -r Remote + -m Message", &["-a", "-r", "-m", "Hello"], vec!(("-m", Plain(Some("Hello".to_string()))), ("-a", Switch(true)), ("-r", Switch(true)))); + +test_expect!(test_27_testcases, "usage: prog [options] + +options: + -a Add + -r Remote + -m Message", &["-armyourass"], vec!(("-m", Plain(Some("yourass".to_string()))), ("-a", Switch(true)), ("-r", Switch(true)))); + +test_expect!(test_28_testcases, "usage: prog [options] + +options: + -a Add + -r Remote + -m Message", &["-a", "-r"], vec!(("-m", Plain(None)), ("-a", Switch(true)), ("-r", Switch(true)))); + +test_expect!(test_29_testcases, "Usage: prog [options] + +Options: --version + --verbose", &["--version"], vec!(("--verbose", Switch(false)), ("--version", Switch(true)))); + +test_expect!(test_30_testcases, "Usage: prog [options] + +Options: --version + --verbose", &["--verbose"], vec!(("--verbose", Switch(true)), ("--version", Switch(false)))); + +test_user_error!(test_31_testcases, "Usage: prog [options] + +Options: --version + --verbose", &["--ver"]); + +test_user_error!(test_32_testcases, "Usage: prog [options] + +Options: --version + --verbose", &["--verb"]); + +test_expect!(test_33_testcases, "usage: prog [-a -r -m ] + +options: + -a Add + -r Remote + -m Message", &["-armyourass"], vec!(("-m", Plain(Some("yourass".to_string()))), ("-a", Switch(true)), ("-r", Switch(true)))); + +test_expect!(test_34_testcases, "usage: prog [-armMSG] + +options: -a Add + -r Remote + -m Message", &["-a", "-r", "-m", "Hello"], vec!(("-m", Plain(Some("Hello".to_string()))), ("-a", Switch(true)), ("-r", Switch(true)))); + +test_expect!(test_35_testcases, "usage: prog -a -b + +options: + -a + -b", &["-a", "-b"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); + +test_expect!(test_36_testcases, "usage: prog -a -b + +options: + -a + -b", &["-b", "-a"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); + +test_user_error!(test_37_testcases, "usage: prog -a -b + +options: + -a + -b", &["-a"]); + +test_user_error!(test_38_testcases, "usage: prog -a -b + +options: + -a + -b", &[]); + +test_expect!(test_39_testcases, "usage: prog (-a -b) + +options: -a + -b", &["-a", "-b"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); + +test_expect!(test_40_testcases, "usage: prog (-a -b) + +options: -a + -b", &["-b", "-a"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); + +test_user_error!(test_41_testcases, "usage: prog (-a -b) + +options: -a + -b", &["-a"]); + +test_user_error!(test_42_testcases, "usage: prog (-a -b) + +options: -a + -b", &[]); + +test_expect!(test_43_testcases, "usage: prog [-a] -b + +options: -a + -b", &["-a", "-b"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); + +test_expect!(test_44_testcases, "usage: prog [-a] -b + +options: -a + -b", &["-b", "-a"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); + +test_user_error!(test_45_testcases, "usage: prog [-a] -b + +options: -a + -b", &["-a"]); + +test_expect!(test_46_testcases, "usage: prog [-a] -b + +options: -a + -b", &["-b"], vec!(("-a", Switch(false)), ("-b", Switch(true)))); + +test_user_error!(test_47_testcases, "usage: prog [-a] -b + +options: -a + -b", &[]); + +test_expect!(test_48_testcases, "usage: prog [(-a -b)] + +options: -a + -b", &["-a", "-b"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); + +test_expect!(test_49_testcases, "usage: prog [(-a -b)] + +options: -a + -b", &["-b", "-a"], vec!(("-a", Switch(true)), ("-b", Switch(true)))); + +test_user_error!(test_50_testcases, "usage: prog [(-a -b)] + +options: -a + -b", &["-a"]); + +test_user_error!(test_51_testcases, "usage: prog [(-a -b)] + +options: -a + -b", &["-b"]); + +test_expect!(test_52_testcases, "usage: prog [(-a -b)] + +options: -a + -b", &[], vec!(("-a", Switch(false)), ("-b", Switch(false)))); + +test_user_error!(test_53_testcases, "usage: prog (-a|-b) + +options: -a + -b", &["-a", "-b"]); + +test_user_error!(test_54_testcases, "usage: prog (-a|-b) + +options: -a + -b", &[]); + +test_expect!(test_55_testcases, "usage: prog (-a|-b) + +options: -a + -b", &["-a"], vec!(("-a", Switch(true)), ("-b", Switch(false)))); + +test_expect!(test_56_testcases, "usage: prog (-a|-b) + +options: -a + -b", &["-b"], vec!(("-a", Switch(false)), ("-b", Switch(true)))); + +test_user_error!(test_57_testcases, "usage: prog [ -a | -b ] + +options: -a + -b", &["-a", "-b"]); + +test_expect!(test_58_testcases, "usage: prog [ -a | -b ] + +options: -a + -b", &[], vec!(("-a", Switch(false)), ("-b", Switch(false)))); + +test_expect!(test_59_testcases, "usage: prog [ -a | -b ] + +options: -a + -b", &["-a"], vec!(("-a", Switch(true)), ("-b", Switch(false)))); + +test_expect!(test_60_testcases, "usage: prog [ -a | -b ] + +options: -a + -b", &["-b"], vec!(("-a", Switch(false)), ("-b", Switch(true)))); + +test_expect!(test_61_testcases, "usage: prog ", &["10"], vec!(("", Plain(Some("10".to_string()))))); + +test_user_error!(test_62_testcases, "usage: prog ", &["10", "20"]); + +test_user_error!(test_63_testcases, "usage: prog ", &[]); + +test_expect!(test_64_testcases, "usage: prog []", &["10"], vec!(("", Plain(Some("10".to_string()))))); + +test_user_error!(test_65_testcases, "usage: prog []", &["10", "20"]); + +test_expect!(test_66_testcases, "usage: prog []", &[], vec!(("", Plain(None)))); + +test_expect!(test_67_testcases, "usage: prog ", &["10", "20", "40"], vec!(("", Plain(Some("40".to_string()))), ("", Plain(Some("10".to_string()))), ("", Plain(Some("20".to_string()))))); + +test_user_error!(test_68_testcases, "usage: prog ", &["10", "20"]); + +test_user_error!(test_69_testcases, "usage: prog ", &[]); + +test_expect!(test_70_testcases, "usage: prog [ ]", &["10", "20", "40"], vec!(("", Plain(Some("40".to_string()))), ("", Plain(Some("10".to_string()))), ("", Plain(Some("20".to_string()))))); + +test_expect!(test_71_testcases, "usage: prog [ ]", &["10", "20"], vec!(("", Plain(None)), ("", Plain(Some("10".to_string()))), ("", Plain(Some("20".to_string()))))); + +test_user_error!(test_72_testcases, "usage: prog [ ]", &[]); + +test_user_error!(test_73_testcases, "usage: prog [ | ]", &["10", "20", "40"]); + +test_expect!(test_74_testcases, "usage: prog [ | ]", &["20", "40"], vec!(("", Plain(Some("40".to_string()))), ("", Plain(None)), ("", Plain(Some("20".to_string()))))); + +test_expect!(test_75_testcases, "usage: prog [ | ]", &[], vec!(("", Plain(None)), ("", Plain(None)), ("", Plain(None)))); + +test_expect!(test_76_testcases, "usage: prog ( --all | ) + +options: + --all", &["10", "--all"], vec!(("--all", Switch(true)), ("", Plain(Some("10".to_string()))), ("", Plain(None)))); + +test_expect!(test_77_testcases, "usage: prog ( --all | ) + +options: + --all", &["10"], vec!(("--all", Switch(false)), ("", Plain(None)), ("", Plain(Some("10".to_string()))))); + +test_user_error!(test_78_testcases, "usage: prog ( --all | ) + +options: + --all", &[]); + +test_expect!(test_79_testcases, "usage: prog [ ]", &["10", "20"], vec!(("", List(vec!("10".to_string(), "20".to_string()))))); + +test_expect!(test_80_testcases, "usage: prog [ ]", &["10"], vec!(("", List(vec!("10".to_string()))))); + +test_expect!(test_81_testcases, "usage: prog [ ]", &[], vec!(("", List(vec!())))); + +test_expect!(test_82_testcases, "usage: prog [( )]", &["10", "20"], vec!(("", List(vec!("10".to_string(), "20".to_string()))))); + +test_user_error!(test_83_testcases, "usage: prog [( )]", &["10"]); + +test_expect!(test_84_testcases, "usage: prog [( )]", &[], vec!(("", List(vec!())))); + +test_expect!(test_85_testcases, "usage: prog NAME...", &["10", "20"], vec!(("NAME", List(vec!("10".to_string(), "20".to_string()))))); + +test_expect!(test_86_testcases, "usage: prog NAME...", &["10"], vec!(("NAME", List(vec!("10".to_string()))))); + +test_user_error!(test_87_testcases, "usage: prog NAME...", &[]); + +test_expect!(test_88_testcases, "usage: prog [NAME]...", &["10", "20"], vec!(("NAME", List(vec!("10".to_string(), "20".to_string()))))); + +test_expect!(test_89_testcases, "usage: prog [NAME]...", &["10"], vec!(("NAME", List(vec!("10".to_string()))))); + +test_expect!(test_90_testcases, "usage: prog [NAME]...", &[], vec!(("NAME", List(vec!())))); + +test_expect!(test_91_testcases, "usage: prog [NAME...]", &["10", "20"], vec!(("NAME", List(vec!("10".to_string(), "20".to_string()))))); + +test_expect!(test_92_testcases, "usage: prog [NAME...]", &["10"], vec!(("NAME", List(vec!("10".to_string()))))); + +test_expect!(test_93_testcases, "usage: prog [NAME...]", &[], vec!(("NAME", List(vec!())))); + +test_expect!(test_94_testcases, "usage: prog [NAME [NAME ...]]", &["10", "20"], vec!(("NAME", List(vec!("10".to_string(), "20".to_string()))))); + +test_expect!(test_95_testcases, "usage: prog [NAME [NAME ...]]", &["10"], vec!(("NAME", List(vec!("10".to_string()))))); + +test_expect!(test_96_testcases, "usage: prog [NAME [NAME ...]]", &[], vec!(("NAME", List(vec!())))); + +test_expect!(test_97_testcases, "usage: prog (NAME | --foo NAME) + +options: --foo", &["10"], vec!(("NAME", Plain(Some("10".to_string()))), ("--foo", Switch(false)))); + +test_expect!(test_98_testcases, "usage: prog (NAME | --foo NAME) + +options: --foo", &["--foo", "10"], vec!(("NAME", Plain(Some("10".to_string()))), ("--foo", Switch(true)))); + +test_user_error!(test_99_testcases, "usage: prog (NAME | --foo NAME) + +options: --foo", &["--foo=10"]); + +test_expect!(test_100_testcases, "usage: prog (NAME | --foo) [--bar | NAME] + +options: --foo +options: --bar", &["10"], vec!(("--bar", Switch(false)), ("NAME", List(vec!("10".to_string()))), ("--foo", Switch(false)))); + +test_expect!(test_101_testcases, "usage: prog (NAME | --foo) [--bar | NAME] + +options: --foo +options: --bar", &["10", "20"], vec!(("--bar", Switch(false)), ("NAME", List(vec!("10".to_string(), "20".to_string()))), ("--foo", Switch(false)))); + +test_expect!(test_102_testcases, "usage: prog (NAME | --foo) [--bar | NAME] + +options: --foo +options: --bar", &["--foo", "--bar"], vec!(("--bar", Switch(true)), ("NAME", List(vec!())), ("--foo", Switch(true)))); + +test_expect!(test_103_testcases, "Naval Fate. + +Usage: + prog ship new ... + prog ship [] move [--speed=] + prog ship shoot + prog mine (set|remove) [--moored|--drifting] + prog -h | --help + prog --version + +Options: + -h --help Show this screen. + --version Show version. + --speed= Speed in knots [default: 10]. + --moored Mored (anchored) mine. + --drifting Drifting mine.", &["ship", "Guardian", "move", "150", "300", "--speed=20"], vec!(("shoot", Switch(false)), ("--moored", Switch(false)), ("--drifting", Switch(false)), ("move", Switch(true)), ("--speed", Plain(Some("20".to_string()))), ("mine", Switch(false)), ("new", Switch(false)), ("--version", Switch(false)), ("set", Switch(false)), ("remove", Switch(false)), ("", List(vec!("Guardian".to_string()))), ("ship", Switch(true)), ("", Plain(Some("150".to_string()))), ("", Plain(Some("300".to_string()))), ("--help", Switch(false)))); + +test_expect!(test_104_testcases, "usage: prog --hello", &["--hello"], vec!(("--hello", Switch(true)))); + +test_expect!(test_105_testcases, "usage: prog [--hello=]", &[], vec!(("--hello", Plain(None)))); + +test_expect!(test_106_testcases, "usage: prog [--hello=]", &["--hello", "wrld"], vec!(("--hello", Plain(Some("wrld".to_string()))))); + +test_expect!(test_107_testcases, "usage: prog [-o]", &[], vec!(("-o", Switch(false)))); + +test_expect!(test_108_testcases, "usage: prog [-o]", &["-o"], vec!(("-o", Switch(true)))); + +test_expect!(test_109_testcases, "usage: prog [-opr]", &["-op"], vec!(("-o", Switch(true)), ("-p", Switch(true)), ("-r", Switch(false)))); + +test_expect!(test_110_testcases, "usage: prog --aabb | --aa", &["--aa"], vec!(("--aa", Switch(true)), ("--aabb", Switch(false)))); + +test_user_error!(test_111_testcases, "usage: prog --aabb | --aa", &["--a"]); + +test_expect!(test_112_testcases, "Usage: prog -v", &["-v"], vec!(("-v", Switch(true)))); + +test_expect!(test_113_testcases, "Usage: prog [-v -v]", &[], vec!(("-v", Counted(0)))); + +test_expect!(test_114_testcases, "Usage: prog [-v -v]", &["-v"], vec!(("-v", Counted(1)))); + +test_expect!(test_115_testcases, "Usage: prog [-v -v]", &["-vv"], vec!(("-v", Counted(2)))); + +test_user_error!(test_116_testcases, "Usage: prog -v ...", &[]); + +test_expect!(test_117_testcases, "Usage: prog -v ...", &["-v"], vec!(("-v", Counted(1)))); + +test_expect!(test_118_testcases, "Usage: prog -v ...", &["-vv"], vec!(("-v", Counted(2)))); + +test_expect!(test_119_testcases, "Usage: prog -v ...", &["-vvvvvv"], vec!(("-v", Counted(6)))); + +test_expect!(test_120_testcases, "Usage: prog [-v | -vv | -vvv] + +This one is probably most readable user-friednly variant.", &[], vec!(("-v", Counted(0)))); + +test_expect!(test_121_testcases, "Usage: prog [-v | -vv | -vvv] + +This one is probably most readable user-friednly variant.", &["-v"], vec!(("-v", Counted(1)))); + +test_expect!(test_122_testcases, "Usage: prog [-v | -vv | -vvv] + +This one is probably most readable user-friednly variant.", &["-vv"], vec!(("-v", Counted(2)))); + +test_user_error!(test_123_testcases, "Usage: prog [-v | -vv | -vvv] + +This one is probably most readable user-friednly variant.", &["-vvvv"]); + +test_expect!(test_124_testcases, "usage: prog [--ver --ver]", &["--ver", "--ver"], vec!(("--ver", Counted(2)))); + +test_expect!(test_125_testcases, "usage: prog [go]", &["go"], vec!(("go", Switch(true)))); + +test_expect!(test_126_testcases, "usage: prog [go go]", &[], vec!(("go", Counted(0)))); + +test_expect!(test_127_testcases, "usage: prog [go go]", &["go"], vec!(("go", Counted(1)))); + +test_expect!(test_128_testcases, "usage: prog [go go]", &["go", "go"], vec!(("go", Counted(2)))); + +test_user_error!(test_129_testcases, "usage: prog [go go]", &["go", "go", "go"]); + +test_expect!(test_130_testcases, "usage: prog go...", &["go", "go", "go", "go", "go"], vec!(("go", Counted(5)))); + +test_expect!(test_131_testcases, "usage: prog [options] [-a] + +options: -a + -b", &["-a"], vec!(("-a", Switch(true)), ("-b", Switch(false)))); + +test_user_error!(test_132_testcases, "usage: prog [options] [-a] + +options: -a + -b", &["-aa"]); + +test_expect!(test_133_testcases, "Usage: prog [options] A + +Options: + -q Be quiet + -v Be verbose.", &["arg"], vec!(("A", Plain(Some("arg".to_string()))), ("-v", Switch(false)), ("-q", Switch(false)))); + +test_expect!(test_134_testcases, "Usage: prog [options] A + +Options: + -q Be quiet + -v Be verbose.", &["-v", "arg"], vec!(("A", Plain(Some("arg".to_string()))), ("-v", Switch(true)), ("-q", Switch(false)))); + +test_expect!(test_135_testcases, "Usage: prog [options] A + +Options: + -q Be quiet + -v Be verbose.", &["-q", "arg"], vec!(("A", Plain(Some("arg".to_string()))), ("-v", Switch(false)), ("-q", Switch(true)))); + +test_expect!(test_136_testcases, "usage: prog [-]", &["-"], vec!(("-", Switch(true)))); + +test_expect!(test_137_testcases, "usage: prog [-]", &[], vec!(("-", Switch(false)))); + +test_expect!(test_138_testcases, "usage: prog [NAME [NAME ...]]", &["a", "b"], vec!(("NAME", List(vec!("a".to_string(), "b".to_string()))))); + +test_expect!(test_139_testcases, "usage: prog [NAME [NAME ...]]", &[], vec!(("NAME", List(vec!())))); + +test_expect!(test_140_testcases, "usage: prog [options] + +options: + -a Add + -m Message", &["-a"], vec!(("-m", Plain(None)), ("-a", Switch(true)))); + +test_expect!(test_141_testcases, "usage: prog --hello", &["--hello"], vec!(("--hello", Switch(true)))); + +test_expect!(test_142_testcases, "usage: prog [--hello=]", &[], vec!(("--hello", Plain(None)))); + +test_expect!(test_143_testcases, "usage: prog [--hello=]", &["--hello", "wrld"], vec!(("--hello", Plain(Some("wrld".to_string()))))); + +test_expect!(test_144_testcases, "usage: prog [-o]", &[], vec!(("-o", Switch(false)))); + +test_expect!(test_145_testcases, "usage: prog [-o]", &["-o"], vec!(("-o", Switch(true)))); + +test_expect!(test_146_testcases, "usage: prog [-opr]", &["-op"], vec!(("-o", Switch(true)), ("-p", Switch(true)), ("-r", Switch(false)))); + +test_expect!(test_147_testcases, "usage: git [-v | --verbose]", &["-v"], vec!(("-v", Switch(true)), ("--verbose", Switch(false)))); + +test_expect!(test_148_testcases, "usage: git remote [-v | --verbose]", &["remote", "-v"], vec!(("-v", Switch(true)), ("remote", Switch(true)), ("--verbose", Switch(false)))); + +test_expect!(test_149_testcases, "usage: prog", &[], vec!()); + +test_expect!(test_150_testcases, "usage: prog + prog ", &["1", "2"], vec!(("", Plain(Some("1".to_string()))), ("", Plain(Some("2".to_string()))))); + +test_expect!(test_151_testcases, "usage: prog + prog ", &[], vec!(("", Plain(None)), ("", Plain(None)))); + +test_expect!(test_152_testcases, "usage: prog + prog", &[], vec!(("", Plain(None)), ("", Plain(None)))); + +test_expect!(test_153_testcases, "usage: prog [--file=]", &[], vec!(("--file", Plain(None)))); + +test_expect!(test_154_testcases, "usage: prog [--file=] + +options: --file ", &[], vec!(("--file", Plain(None)))); + +test_expect!(test_155_testcases, "Usage: prog [-a ] + +Options: -a, --address TCP address [default: localhost:6283].", &[], vec!(("--address", Plain(Some("localhost:6283".to_string()))))); + +test_expect!(test_156_testcases, "usage: prog --long= ...", &["--long", "one"], vec!(("--long", List(vec!("one".to_string()))))); + +test_expect!(test_157_testcases, "usage: prog --long= ...", &["--long", "one", "--long", "two"], vec!(("--long", List(vec!("one".to_string(), "two".to_string()))))); + +test_expect!(test_158_testcases, "usage: prog (go --speed=)...", &["go", "left", "--speed=5", "go", "right", "--speed=9"], vec!(("go", Counted(2)), ("", List(vec!("left".to_string(), "right".to_string()))), ("--speed", List(vec!("5".to_string(), "9".to_string()))))); + +test_expect!(test_159_testcases, "usage: prog [options] -a + +options: -a", &["-a"], vec!(("-a", Switch(true)))); + +test_expect!(test_160_testcases, "usage: prog [-o ]... + +options: -o [default: x]", &["-o", "this", "-o", "that"], vec!(("-o", List(vec!("this".to_string(), "that".to_string()))))); + +test_expect!(test_161_testcases, "usage: prog [-o ]... + +options: -o [default: x]", &[], vec!(("-o", List(vec!("x".to_string()))))); + +test_expect!(test_162_testcases, "usage: prog [-o ]... + +options: -o [default: x y]", &["-o", "this"], vec!(("-o", List(vec!("this".to_string()))))); + +test_expect!(test_163_testcases, "usage: prog [-o ]... + +options: -o [default: x y]", &[], vec!(("-o", List(vec!("x".to_string(), "y".to_string()))))); + +test_expect!(test_164_testcases, "usage: prog -pPATH + +options: -p PATH", &["-pHOME"], vec!(("-p", Plain(Some("HOME".to_string()))))); + +test_expect!(test_165_testcases, "Usage: foo (--xx=X|--yy=Y)...", &["--xx=1", "--yy=2"], vec!(("--yy", List(vec!("2".to_string()))), ("--xx", List(vec!("1".to_string()))))); + +test_expect!(test_166_testcases, "usage: prog []", &["f.txt"], vec!(("", Plain(Some("f.txt".to_string()))))); + +test_expect!(test_167_testcases, "usage: prog [--input=]...", &["--input", "a.txt", "--input=b.txt"], vec!(("--input", List(vec!("a.txt".to_string(), "b.txt".to_string()))))); + +test_expect!(test_168_testcases, "usage: prog good [options] + prog fail [options] + +options: --loglevel=N", &["fail", "--loglevel", "5"], vec!(("fail", Switch(true)), ("good", Switch(false)), ("--loglevel", Plain(Some("5".to_string()))))); + +test_expect!(test_169_testcases, "usage:prog --foo", &["--foo"], vec!(("--foo", Switch(true)))); + +test_expect!(test_170_testcases, "PROGRAM USAGE: prog --foo", &["--foo"], vec!(("--foo", Switch(true)))); + +test_expect!(test_171_testcases, "Usage: prog --foo + prog --bar +NOT PART OF SECTION", &["--foo"], vec!(("--bar", Switch(false)), ("--foo", Switch(true)))); + +test_expect!(test_172_testcases, "Usage: + prog --foo + prog --bar + +NOT PART OF SECTION", &["--foo"], vec!(("--bar", Switch(false)), ("--foo", Switch(true)))); + +test_expect!(test_173_testcases, "Usage: + prog --foo + prog --bar +NOT PART OF SECTION", &["--foo"], vec!(("--bar", Switch(false)), ("--foo", Switch(true)))); + +test_expect!(test_174_testcases, "Usage: prog [options] + +global options: --foo +local options: --baz + --bar +other options: + --egg + --spam +-not-an-option-", &["--bar", "--egg"], vec!(("--bar", Switch(true)), ("--egg", Switch(true)), ("--spam", Switch(false)))); + +test_expect!(test_175_testcases, "Usage: prog [-a] [--] [...]", &["-a"], vec!(("", List(vec!())), ("-a", Switch(true)))); + +test_expect!(test_176_testcases, "Usage: prog [-a] [--] [...]", &["--"], vec!(("", List(vec!())), ("-a", Switch(false)))); + +test_expect!(test_177_testcases, "Usage: prog [-a] [--] [...]", &["-a", "--", "-b"], vec!(("", List(vec!("-b".to_string()))), ("-a", Switch(true)))); + +test_expect!(test_178_testcases, "Usage: prog [-a] [--] [...]", &["-a", "--", "-a"], vec!(("", List(vec!("-a".to_string()))), ("-a", Switch(true)))); + +test_expect!(test_179_testcases, "Usage: prog [-a] [--] [...]", &["--", "-a"], vec!(("", List(vec!("-a".to_string()))), ("-a", Switch(false)))); + +test_expect!(test_180_testcases, "Usage: prog test [options] [--] [...]", &["test", "a", "--", "-b"], vec!(("", List(vec!("a".to_string(), "-b".to_string()))))); + +test_expect!(test_181_testcases, "Usage: prog test [options] [--] [...]", &["test", "--", "-b"], vec!(("", List(vec!("-b".to_string()))))); + +test_user_error!(test_182_testcases, "Usage: prog test [options] [--] [...]", &["test", "a", "-b"]); + +test_expect!(test_183_testcases, "Usage: prog test [options] [--] [...]", &["test", "--", "-b", "--"], vec!(("", List(vec!("-b".to_string(), "--".to_string()))))); + +test_expect!(test_184_testcases, "Usage: prog [options] + +Options: + -a ... Foo", &[], vec!(("-a", Counted(0)))); + +test_expect!(test_185_testcases, "Usage: prog [options] + +Options: + -a ... Foo", &["-a"], vec!(("-a", Counted(1)))); + +test_expect!(test_186_testcases, "Usage: prog [options] + +Options: + -a ... Foo", &["-a", "-a"], vec!(("-a", Counted(2)))); + +test_expect!(test_187_testcases, "Usage: prog [options] + +Options: + -a ... Foo", &["-aa"], vec!(("-a", Counted(2)))); + +test_expect!(test_188_testcases, "Usage: prog [options] + +Options: + -a ... Foo", &["-a", "-a", "-a"], vec!(("-a", Counted(3)))); + +test_expect!(test_189_testcases, "Usage: prog [options] + +Options: + -a ... Foo", &["-aaa"], vec!(("-a", Counted(3)))); + +test_expect!(test_190_testcases, "Usage: prog [options] + +Options: + -a, --all ... Foo", &[], vec!(("-a", Counted(0)))); + +test_expect!(test_191_testcases, "Usage: prog [options] + +Options: + -a, --all ... Foo", &["-a"], vec!(("-a", Counted(1)))); + +test_expect!(test_192_testcases, "Usage: prog [options] + +Options: + -a, --all ... Foo", &["-a", "--all"], vec!(("-a", Counted(2)))); + +test_expect!(test_193_testcases, "Usage: prog [options] + +Options: + -a, --all ... Foo", &["-aa", "--all"], vec!(("-a", Counted(3)))); + +test_expect!(test_194_testcases, "Usage: prog [options] + +Options: + -a, --all ... Foo", &["--all"], vec!(("-a", Counted(1)))); + +test_expect!(test_195_testcases, "Usage: prog [options] + +Options: + -a, --all ... Foo", &["--all", "--all"], vec!(("-a", Counted(2)))); + +test_expect!(test_196_testcases, "Usage: prog [options] + +Options: + -a, --all ARG ... Foo", &[], vec!(("-a", List(vec!())))); + +test_expect!(test_197_testcases, "Usage: prog [options] + +Options: + -a, --all ARG ... Foo", &["-a", "1"], vec!(("-a", List(vec!("1".to_string()))))); + +test_expect!(test_198_testcases, "Usage: prog [options] + +Options: + -a, --all ARG ... Foo", &["-a", "2", "--all", "3"], vec!(("-a", List(vec!("2".to_string(), "3".to_string()))))); + +test_expect!(test_199_testcases, "Usage: prog [options] + +Options: + -a, --all ARG ... Foo", &["-a4", "-a5", "--all", "6"], vec!(("-a", List(vec!("4".to_string(), "5".to_string(), "6".to_string()))))); + +test_expect!(test_200_testcases, "Usage: prog [options] + +Options: + -a, --all ARG ... Foo", &["--all", "7"], vec!(("-a", List(vec!("7".to_string()))))); + +test_expect!(test_201_testcases, "Usage: prog [options] + +Options: + -a, --all ARG ... Foo", &["--all", "8", "--all", "9"], vec!(("-a", List(vec!("8".to_string(), "9".to_string()))))); + +test_expect!(test_202_testcases, "Usage: prog [options] + +Options: + --all ... Foo", &[], vec!(("--all", Counted(0)))); + +test_expect!(test_203_testcases, "Usage: prog [options] + +Options: + --all ... Foo", &["--all"], vec!(("--all", Counted(1)))); + +test_expect!(test_204_testcases, "Usage: prog [options] + +Options: + --all ... Foo", &["--all", "--all"], vec!(("--all", Counted(2)))); + +test_expect!(test_205_testcases, "Usage: prog [options] + +Options: + --all=ARG ... Foo", &[], vec!(("--all", List(vec!())))); + +test_expect!(test_206_testcases, "Usage: prog [options] + +Options: + --all=ARG ... Foo", &["--all", "1"], vec!(("--all", List(vec!("1".to_string()))))); + +test_expect!(test_207_testcases, "Usage: prog [options] + +Options: + --all=ARG ... Foo", &["--all", "2", "--all", "3"], vec!(("--all", List(vec!("2".to_string(), "3".to_string()))))); + +test_user_error!(test_208_testcases, "Usage: prog [options] + +Options: + --all ... Foo", &["--all", "--all"]); + +test_user_error!(test_209_testcases, "Usage: prog [options] + +Options: + --all ARG ... Foo", &["--all", "foo", "--all", "bar"]); + +test_expect!(test_210_testcases, "Usage: prog --speed=ARG", &["--speed", "20"], vec!(("--speed", Plain(Some("20".to_string()))))); + +test_expect!(test_211_testcases, "Usage: prog --speed=ARG", &["--speed=20"], vec!(("--speed", Plain(Some("20".to_string()))))); + +test_expect!(test_212_testcases, "Usage: prog --speed=ARG", &["--speed=-20"], vec!(("--speed", Plain(Some("-20".to_string()))))); + +test_expect!(test_213_testcases, "Usage: prog --speed=ARG", &["--speed", "-20"], vec!(("--speed", Plain(Some("-20".to_string()))))); + +test_expect!(test_214_testcases, "usage: prog [--datetime=] + +options: --datetime= Regex for datetimes [default: ^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}]", &[], vec!(("--datetime", Plain(Some("^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}".to_string()))))); + +test_expect!(test_215_testcases, "Usage: prog [options] + +Options: + -x ARG + -y", &["-x-y"], vec!(("-x", Plain(Some("-y".to_string()))))); + diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/src/wordlist.rs cargo-0.19.0/vendor/docopt-0.7.0/src/wordlist.rs --- cargo-0.17.0/vendor/docopt-0.7.0/src/wordlist.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/src/wordlist.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,110 @@ +#[macro_use] +extern crate lazy_static; +extern crate regex; +extern crate rustc_serialize; +extern crate strsim; + +use std::collections::HashMap; +use std::io::{self, Read, Write}; + +use dopt::Docopt; +use parse::{Atom, Parser}; + +// cheat until we get syntax extensions back :-( +macro_rules! regex( + ($s:expr) => (::regex::Regex::new($s).unwrap()); +); + +macro_rules! werr( + ($($arg:tt)*) => ({ + use std::io::{Write, stderr}; + write!(&mut stderr(), $($arg)*).unwrap(); + }) +); + +fn cap_or_empty<'t>(caps: ®ex::Captures<'t>, name: &str) -> &'t str { + caps.name(name).map_or("", |m| m.as_str()) +} + +#[allow(dead_code)] +mod dopt; +#[allow(dead_code)] +mod parse; +#[allow(dead_code)] +mod synonym; + +const USAGE: &'static str = " +Usage: docopt-wordlist [( )] ... + +docopt-wordlist prints a list of available flags and commands arguments for the +given usage (provided on stdin). + +Example use: + + your-command --help | docopt-wordlist + +This command also supports completing positional arguments when given a list of +choices. The choices are included in the word list if and only if the argument +name appears in the usage string. For example: + + your-command --help | docopt-wordlist 'arg' 'a b c' + +Which will only include 'a', 'b' and 'c' in the wordlist if +'your-command --help' contains a positional argument named 'arg'. +"; + +#[derive(Debug, RustcDecodable)] +struct Args { + arg_name: Vec, + arg_possibles: Vec, +} + +fn main() { + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(args) { + Ok(_) => {}, + Err(err) => { + write!(&mut io::stderr(), "{}", err).unwrap(); + ::std::process::exit(1) + } + } +} + +fn run(args: Args) -> Result<(), String> { + let mut usage = String::new(); + try!(io::stdin().read_to_string(&mut usage).map_err(|e| e.to_string())); + let parsed = try!(Parser::new(&usage).map_err(|e| e.to_string())); + let arg_possibles: HashMap> = + args.arg_name.iter() + .zip(args.arg_possibles.iter()) + .map(|(name, possibles)| { + let choices = + regex!(r"[ \t]+").split(&**possibles) + .map(|s| s.to_string()) + .collect::>(); + (name.clone(), choices) + }) + .collect(); + + let mut words = vec![]; + for k in parsed.descs.keys() { + if let Atom::Positional(ref arg_name) = *k { + if let Some(choices) = arg_possibles.get(arg_name) { + words.extend(choices.iter().map(|s| s.clone())); + } + // If the user hasn't given choices for this positional argument, + // then there's really nothing to complete here. + } else { + words.push(k.to_string()); + } + } + for (k, _) in parsed.descs.synonyms() { + // We don't need to do anything special here since synonyms can + // only be flags, which we always include in the wordlist. + words.push(k.to_string()); + } + println!("{}", words.join(" ")); + Ok(()) +} diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/.travis.yml cargo-0.19.0/vendor/docopt-0.7.0/.travis.yml --- cargo-0.17.0/vendor/docopt-0.7.0/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/.travis.yml 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,15 @@ +language: rust +rust: + - 1.12.0 + - stable + - beta + - nightly +script: + - cargo build --verbose + - cargo test --verbose + - cargo doc + - if [ "$TRAVIS_RUST_VERSION" = "nightly" ]; then + cd docopt_macros; + cargo build --verbose; + cargo test --verbose; + fi diff -Nru cargo-0.17.0/vendor/docopt-0.7.0/UNLICENSE cargo-0.19.0/vendor/docopt-0.7.0/UNLICENSE --- cargo-0.17.0/vendor/docopt-0.7.0/UNLICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/docopt-0.7.0/UNLICENSE 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/benches/bench.rs cargo-0.19.0/vendor/dtoa-0.4.1/benches/bench.rs --- cargo-0.17.0/vendor/dtoa-0.4.1/benches/bench.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/benches/bench.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,54 @@ +#![feature(test)] + +extern crate dtoa; +extern crate test; + +macro_rules! benches { + ($($name:ident($value:expr),)*) => { + mod bench_dtoa { + use test::{Bencher, black_box}; + $( + #[bench] + fn $name(b: &mut Bencher) { + use dtoa; + + let mut buf = Vec::with_capacity(20); + + b.iter(|| { + buf.clear(); + dtoa::write(&mut buf, black_box($value)).unwrap() + }); + } + )* + } + + mod bench_fmt { + use test::{Bencher, black_box}; + $( + #[bench] + fn $name(b: &mut Bencher) { + use std::io::Write; + + let mut buf = Vec::with_capacity(20); + + b.iter(|| { + buf.clear(); + write!(&mut buf, "{}", black_box($value)).unwrap() + }); + } + )* + } + } +} + +benches!( + bench_0_f64(0f64), + bench_short_f64(0.1234f64), + bench_e_f64(2.718281828459045f64), + bench_max_f64(::std::f64::MAX), + + bench_0_f32(0f32), + bench_short_f32(0.1234f32), + bench_e_f32(2.718281828459045f32), + bench_max_f32(::std::f32::MAX), +); diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/.cargo-checksum.json cargo-0.19.0/vendor/dtoa-0.4.1/.cargo-checksum.json --- cargo-0.17.0/vendor/dtoa-0.4.1/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"a2b867b2e28af9bde20a669a6ff0f366ecc5150b89314cd7ec97ed95bb427547","Cargo.toml":"5755612ec9d7adc4ec1a68e3b096bfa45af8ae7dfd8237515c9f85786c9a9356","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e18259ab3aa7f39a194795bdad8039b3c5fd544f6dd922526c9326c44842b76d","README.md":"8de1d7a3224bfae275197dc75d128f73e2970c26f6790b575a8346074f7783c6","benches/bench.rs":"ac713ab4e1c668dea70416504955563fcd6bd2982ae1cfa3a1c0043e09dd893f","performance.png":"5909ebd7b98691502c6f019c126758da40edc7031b9da32bce45df34273b1b87","src/diyfp.rs":"81754c3d1b8ff2347a506187ef43a666f09e20ae0e53436226c969d7e3f737dc","src/dtoa.rs":"f5cdd96d6ac9d3c50289a090a6d6801d36cb121c2a5e6d8acd1aa41013fded76","src/lib.rs":"037eaaf26de236c916332fb76bc72b7a8d588df8c90a8dab5636140976559adb","tests/test.rs":"296f3c322e08508fd372e436434fdd209bb911cab2124ea654d5f78d90f3eeea"},"package":"80c8b71fd71146990a9742fc06dcbbde19161a267e0ad4e572c35162f4578c90"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/Cargo.toml cargo-0.19.0/vendor/dtoa-0.4.1/Cargo.toml --- cargo-0.17.0/vendor/dtoa-0.4.1/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,9 @@ +[package] +name = "dtoa" +version = "0.4.1" +authors = ["David Tolnay "] +license = "MIT/Apache-2.0" +description = "Fast functions for printing floating-point primitives to an io::Write" +repository = "https://github.com/dtolnay/dtoa" +documentation = "https://github.com/dtolnay/dtoa" +categories = ["value-formatting"] diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/.gitignore cargo-0.19.0/vendor/dtoa-0.4.1/.gitignore --- cargo-0.17.0/vendor/dtoa-0.4.1/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/.gitignore 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,2 @@ +target +Cargo.lock diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/LICENSE-APACHE cargo-0.19.0/vendor/dtoa-0.4.1/LICENSE-APACHE --- cargo-0.17.0/vendor/dtoa-0.4.1/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/LICENSE-APACHE 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/LICENSE-MIT cargo-0.19.0/vendor/dtoa-0.4.1/LICENSE-MIT --- cargo-0.17.0/vendor/dtoa-0.4.1/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/LICENSE-MIT 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,25 @@ +Copyright (c) 2016 Itoa Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. Binary files /tmp/tmplxiqkI/U7kx02RAH_/cargo-0.17.0/vendor/dtoa-0.4.1/performance.png and /tmp/tmplxiqkI/L4nBRNMLnv/cargo-0.19.0/vendor/dtoa-0.4.1/performance.png differ diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/README.md cargo-0.19.0/vendor/dtoa-0.4.1/README.md --- cargo-0.17.0/vendor/dtoa-0.4.1/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/README.md 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,69 @@ +dtoa +==== + +[![Build Status](https://api.travis-ci.org/dtolnay/dtoa.svg?branch=master)](https://travis-ci.org/dtolnay/dtoa) +[![Latest Version](https://img.shields.io/crates/v/dtoa.svg)](https://crates.io/crates/dtoa) + +This crate provides fast functions for printing floating-point primitives to an +[`io::Write`](https://doc.rust-lang.org/std/io/trait.Write.html). The +implementation is a straightforward Rust port of [Milo +Yip](https://github.com/miloyip)'s C++ implementation +[dtoa.h](https://github.com/miloyip/rapidjson/blob/master/include/rapidjson/internal/dtoa.h). +The original C++ code of each function is included in comments. + +See also [`itoa`](https://github.com/dtolnay/itoa) for printing integer +primitives. + +## Performance + +![performance](https://raw.githubusercontent.com/dtolnay/dtoa/master/performance.png) + +## Functions + +```rust +extern crate dtoa; + +// write to a vector or other io::Write +let mut buf = Vec::new(); +dtoa::write(&mut buf, 2.71828f64)?; +println!("{:?}", buf); + +// write to a stack buffer +let mut bytes = [b'\0'; 20]; +let n = dtoa::write(&mut bytes[..], 2.71828f64)?; +println!("{:?}", &bytes[..n]); +``` + +The function signature is: + +```rust +fn write(writer: W, value: V) -> io::Result<()> +``` + +where `dtoa::Floating` is implemented for `f32` and `f64`. The return value +gives the number of bytes written. + +## Dependency + +Dtoa is available on [crates.io](https://crates.io/crates/dtoa). Use the +following in `Cargo.toml`: + +```toml +[dependencies] +dtoa = "0.4" +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in dtoa by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/src/diyfp.rs cargo-0.19.0/vendor/dtoa-0.4.1/src/diyfp.rs --- cargo-0.17.0/vendor/dtoa-0.4.1/src/diyfp.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/src/diyfp.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,232 @@ +// Copyright 2016 Dtoa Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::ops; + +#[derive(Copy, Clone, Debug)] +pub struct DiyFp { + pub f: F, + pub e: E, +} + +impl DiyFp { + pub fn new(f: F, e: E) -> Self { + DiyFp { f: f, e: e } + } +} + +impl ops::Mul for DiyFp { + type Output = Self; + fn mul(self, rhs: Self) -> Self { + let mut tmp = self.f as u64 * rhs.f as u64; + tmp += 1u64 << 31; // mult_round + DiyFp { + f: (tmp >> 32) as u32, + e: self.e + rhs.e + 32, + } + } +} + +impl ops::Mul for DiyFp { + type Output = Self; + fn mul(self, rhs: Self) -> Self { + let m32 = 0xFFFFFFFFu64; + let a = self.f >> 32; + let b = self.f & m32; + let c = rhs.f >> 32; + let d = rhs.f & m32; + let ac = a * c; + let bc = b * c; + let ad = a * d; + let bd = b * d; + let mut tmp = (bd >> 32) + (ad & m32) + (bc & m32); + tmp += 1u64 << 31; // mult_round + DiyFp { + f: ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), + e: self.e + rhs.e + 64, + } + } +} + +#[macro_export] +macro_rules! diyfp {( + floating_type: $fty:ty, + significand_type: $sigty:ty, + exponent_type: $expty:ty, + + diy_significand_size: $diy_significand_size:expr, + significand_size: $significand_size:expr, + exponent_bias: $exponent_bias:expr, + mask_type: $mask_type:ty, + exponent_mask: $exponent_mask:expr, + significand_mask: $significand_mask:expr, + hidden_bit: $hidden_bit:expr, + cached_powers_f: $cached_powers_f:expr, + cached_powers_e: $cached_powers_e:expr, + min_power: $min_power:expr, +) => { + +type DiyFp = diyfp::DiyFp<$sigty, $expty>; + +impl DiyFp { + // Preconditions: + // `d` must have a positive sign and must not be infinity or NaN. + /* + explicit DiyFp(double d) { + union { + double d; + uint64_t u64; + } u = { d }; + + int biased_e = static_cast((u.u64 & kDpExponentMask) >> kDpSignificandSize); + uint64_t significand = (u.u64 & kDpSignificandMask); + if (biased_e != 0) { + f = significand + kDpHiddenBit; + e = biased_e - kDpExponentBias; + } + else { + f = significand; + e = kDpMinExponent + 1; + } + } + */ + unsafe fn from(d: $fty) -> Self { + let u: $mask_type = mem::transmute(d); + + let biased_e = ((u & $exponent_mask) >> $significand_size) as $expty; + let significand = u & $significand_mask; + if biased_e != 0 { + DiyFp { + f: significand + $hidden_bit, + e: biased_e - $exponent_bias - $significand_size, + } + } else { + DiyFp { + f: significand, + e: 1 - $exponent_bias - $significand_size, + } + } + } + + // Normalizes so that the highest bit of the diy significand is 1. + /* + DiyFp Normalize() const { + DiyFp res = *this; + while (!(res.f & (static_cast(1) << 63))) { + res.f <<= 1; + res.e--; + } + return res; + } + */ + fn normalize(self) -> DiyFp { + let mut res = self; + while (res.f & (1 << ($diy_significand_size - 1))) == 0 { + res.f <<= 1; + res.e -= 1; + } + res + } + + // Normalizes so that the highest bit of the diy significand is 1. + // + // Precondition: + // `self.f` must be no more than 2 bits longer than the f64 significand. + /* + DiyFp NormalizeBoundary() const { + DiyFp res = *this; + while (!(res.f & (kDpHiddenBit << 1))) { + res.f <<= 1; + res.e--; + } + res.f <<= (kDiySignificandSize - kDpSignificandSize - 2); + res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2); + return res; + } + */ + fn normalize_boundary(self) -> DiyFp { + let mut res = self; + while (res.f & $hidden_bit << 1) == 0 { + res.f <<= 1; + res.e -= 1; + } + res.f <<= $diy_significand_size - $significand_size - 2; + res.e -= $diy_significand_size - $significand_size - 2; + res + } + + // Normalizes `self - e` and `self + e` where `e` is half of the least + // significant digit of `self`. The plus is normalized so that the highest + // bit of the diy significand is 1. The minus is normalized so that it has + // the same exponent as the plus. + // + // Preconditions: + // `self` must have been returned directly from `DiyFp::from_f64`. + // `self.f` must not be zero. + /* + void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const { + DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary(); + DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1); + mi.f <<= mi.e - pl.e; + mi.e = pl.e; + *plus = pl; + *minus = mi; + } + */ + fn normalized_boundaries(self) -> (DiyFp, DiyFp) { + let pl = DiyFp::new((self.f << 1) + 1, self.e - 1).normalize_boundary(); + let mut mi = if self.f == $hidden_bit { + DiyFp::new((self.f << 2) - 1, self.e - 2) + } else { + DiyFp::new((self.f << 1) - 1, self.e - 1) + }; + mi.f <<= mi.e - pl.e; + mi.e = pl.e; + (mi, pl) + } +} + +impl ops::Sub for DiyFp { + type Output = Self; + fn sub(self, rhs: Self) -> Self { + DiyFp { + f: self.f - rhs.f, + e: self.e, + } + } +} + +/* +inline DiyFp GetCachedPower(int e, int* K) { + //int k = static_cast(ceil((-61 - e) * 0.30102999566398114)) + 374; + double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive + int k = static_cast(dk); + if (dk - k > 0.0) + k++; + + unsigned index = static_cast((k >> 3) + 1); + *K = -(-348 + static_cast(index << 3)); // decimal exponent no need lookup table + + return GetCachedPowerByIndex(index); +} +*/ +#[inline] +fn get_cached_power(e: $expty) -> (DiyFp, isize) { + let dk = (3 - $diy_significand_size - e) as f64 * 0.30102999566398114f64 - ($min_power + 1) as f64; + let mut k = dk as isize; + if dk - k as f64 > 0.0 { + k += 1; + } + + let index = ((k >> 3) + 1) as usize; + let k = -($min_power + (index << 3) as isize); + + (DiyFp::new($cached_powers_f[index], $cached_powers_e[index] as $expty), k) +} + +}} diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/src/dtoa.rs cargo-0.19.0/vendor/dtoa-0.4.1/src/dtoa.rs --- cargo-0.17.0/vendor/dtoa-0.4.1/src/dtoa.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/src/dtoa.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,479 @@ +// Copyright 2016 Dtoa Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[macro_export] +macro_rules! dtoa {( + floating_type: $fty:ty, + significand_type: $sigty:ty, + exponent_type: $expty:ty, + $($diyfp_param:ident: $diyfp_value:tt,)* +) => { + +diyfp! { + floating_type: $fty, + significand_type: $sigty, + exponent_type: $expty, + $($diyfp_param: $diyfp_value,)* +}; + +/* +inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) { + while (rest < wp_w && delta - rest >= ten_kappa && + (rest + ten_kappa < wp_w || /// closer + wp_w - rest > rest + ten_kappa - wp_w)) { + buffer[len - 1]--; + rest += ten_kappa; + } +} +*/ + +#[inline] +unsafe fn grisu_round(buffer: *mut u8, len: isize, delta: $sigty, mut rest: $sigty, ten_kappa: $sigty, wp_w: $sigty) { + while rest < wp_w && delta - rest >= ten_kappa && + (rest + ten_kappa < wp_w || // closer + wp_w - rest > rest + ten_kappa - wp_w) { + *buffer.offset(len - 1) -= 1; + rest += ten_kappa; + } +} + +/* +inline unsigned CountDecimalDigit32(uint32_t n) { + // Simple pure C++ implementation was faster than __builtin_clz version in this situation. + if (n < 10) return 1; + if (n < 100) return 2; + if (n < 1000) return 3; + if (n < 10000) return 4; + if (n < 100000) return 5; + if (n < 1000000) return 6; + if (n < 10000000) return 7; + if (n < 100000000) return 8; + // Will not reach 10 digits in DigitGen() + //if (n < 1000000000) return 9; + //return 10; + return 9; +} +*/ + +#[inline] +fn count_decimal_digit32(n: u32) -> usize { + if n < 10 { 1 } + else if n < 100 { 2 } + else if n < 1000 { 3 } + else if n < 10000 { 4 } + else if n < 100000 { 5 } + else if n < 1000000 { 6 } + else if n < 10000000 { 7 } + else if n < 100000000 { 8 } + // Will not reach 10 digits in digit_gen() + else { 9 } +} + +/* +inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) { + static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; + const DiyFp one(uint64_t(1) << -Mp.e, Mp.e); + const DiyFp wp_w = Mp - W; + uint32_t p1 = static_cast(Mp.f >> -one.e); + uint64_t p2 = Mp.f & (one.f - 1); + unsigned kappa = CountDecimalDigit32(p1); // kappa in [0, 9] + *len = 0; +*/ + +// Returns length and k. +#[inline] +unsafe fn digit_gen(w: DiyFp, mp: DiyFp, mut delta: $sigty, buffer: *mut u8, mut k: isize) -> (isize, isize) { + static POW10: [$sigty; 10] = [ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 ]; + let one = DiyFp::new(1 << -mp.e, mp.e); + let wp_w = mp - w; + let mut p1 = (mp.f >> -one.e) as u32; + let mut p2 = mp.f & (one.f - 1); + let mut kappa = count_decimal_digit32(p1); // kappa in [0, 9] + let mut len = 0; + + /* + while (kappa > 0) { + uint32_t d = 0; + switch (kappa) { + case 9: d = p1 / 100000000; p1 %= 100000000; break; + case 8: d = p1 / 10000000; p1 %= 10000000; break; + case 7: d = p1 / 1000000; p1 %= 1000000; break; + case 6: d = p1 / 100000; p1 %= 100000; break; + case 5: d = p1 / 10000; p1 %= 10000; break; + case 4: d = p1 / 1000; p1 %= 1000; break; + case 3: d = p1 / 100; p1 %= 100; break; + case 2: d = p1 / 10; p1 %= 10; break; + case 1: d = p1; p1 = 0; break; + default:; + } + if (d || *len) + buffer[(*len)++] = static_cast('0' + static_cast(d)); + kappa--; + uint64_t tmp = (static_cast(p1) << -one.e) + p2; + if (tmp <= delta) { + *K += kappa; + GrisuRound(buffer, *len, delta, tmp, static_cast(kPow10[kappa]) << -one.e, wp_w.f); + return; + } + } + */ + while kappa > 0 { + let mut d = 0u32; + match kappa { + 9 => { d = p1 / 100000000; p1 %= 100000000; } + 8 => { d = p1 / 10000000; p1 %= 10000000; } + 7 => { d = p1 / 1000000; p1 %= 1000000; } + 6 => { d = p1 / 100000; p1 %= 100000; } + 5 => { d = p1 / 10000; p1 %= 10000; } + 4 => { d = p1 / 1000; p1 %= 1000; } + 3 => { d = p1 / 100; p1 %= 100; } + 2 => { d = p1 / 10; p1 %= 10; } + 1 => { d = p1; p1 = 0; } + _ => {} + } + if d != 0 || len != 0 { + *buffer.offset(len) = b'0' + d as u8; + len += 1; + } + kappa -= 1; + let tmp = (p1 as $sigty << -one.e) + p2; + if tmp <= delta { + k += kappa as isize; + grisu_round(buffer, len, delta, tmp, POW10[kappa] << -one.e, wp_w.f); + return (len, k); + } + } + + // kappa = 0 + /* + for (;;) { + p2 *= 10; + delta *= 10; + char d = static_cast(p2 >> -one.e); + if (d || *len) + buffer[(*len)++] = static_cast('0' + d); + p2 &= one.f - 1; + kappa--; + if (p2 < delta) { + *K += kappa; + int index = -static_cast(kappa); + GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[-static_cast(kappa)] : 0)); + return; + } + } + */ + loop { + p2 *= 10; + delta *= 10; + let d = (p2 >> -one.e) as u8; + if d != 0 || len != 0 { + *buffer.offset(len) = b'0' + d; + len += 1; + } + p2 &= one.f - 1; + kappa = kappa.wrapping_sub(1); + if p2 < delta { + k += kappa as isize; + let index = -(kappa as isize); + grisu_round(buffer, len, delta, p2, one.f, wp_w.f * if index < 9 { POW10[-(kappa as isize) as usize] } else { 0 }); + return (len, k); + } + } +} + +/* +inline void Grisu2(double value, char* buffer, int* length, int* K) { + const DiyFp v(value); + DiyFp w_m, w_p; + v.NormalizedBoundaries(&w_m, &w_p); + + const DiyFp c_mk = GetCachedPower(w_p.e, K); + const DiyFp W = v.Normalize() * c_mk; + DiyFp Wp = w_p * c_mk; + DiyFp Wm = w_m * c_mk; + Wm.f++; + Wp.f--; + DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K); +} +*/ + +// Returns length and k. +#[inline] +unsafe fn grisu2(value: $fty, buffer: *mut u8) -> (isize, isize) { + let v = DiyFp::from(value); + let (w_m, w_p) = v.normalized_boundaries(); + + let (c_mk, k) = get_cached_power(w_p.e); + let w = v.normalize() * c_mk; + let mut wp = w_p * c_mk; + let mut wm = w_m * c_mk; + wm.f += 1; + wp.f -= 1; + digit_gen(w, wp, wp.f - wm.f, buffer, k) +} + +/* +inline char* WriteExponent(int K, char* buffer) { + if (K < 0) { + *buffer++ = '-'; + K = -K; + } + + if (K >= 100) { + *buffer++ = static_cast('0' + static_cast(K / 100)); + K %= 100; + const char* d = GetDigitsLut() + K * 2; + *buffer++ = d[0]; + *buffer++ = d[1]; + } + else if (K >= 10) { + const char* d = GetDigitsLut() + K * 2; + *buffer++ = d[0]; + *buffer++ = d[1]; + } + else + *buffer++ = static_cast('0' + static_cast(K)); + + return buffer; +} +*/ + +#[inline] +unsafe fn write_exponent(mut k: isize, mut buffer: *mut u8) -> *mut u8 { + if k < 0 { + *buffer = b'-'; + buffer = buffer.offset(1); + k = -k; + } + + if k >= 100 { + *buffer = b'0' + (k / 100) as u8; + k %= 100; + let d = DEC_DIGITS_LUT.as_ptr().offset(k * 2); + ptr::copy_nonoverlapping(d, buffer.offset(1), 2); + buffer.offset(3) + } else if k >= 10 { + let d = DEC_DIGITS_LUT.as_ptr().offset(k * 2); + ptr::copy_nonoverlapping(d, buffer, 2); + buffer.offset(2) + } else { + *buffer = b'0' + k as u8; + buffer.offset(1) + } +} + +/* +inline char* Prettify(char* buffer, int length, int k, int maxDecimalPlaces) { + const int kk = length + k; // 10^(kk-1) <= v < 10^kk +*/ + +#[inline] +unsafe fn prettify(buffer: *mut u8, length: isize, k: isize) -> *mut u8 { + let kk = length + k; // 10^(kk-1) <= v < 10^kk + + /* + if (0 <= k && kk <= 21) { + // 1234e7 -> 12340000000 + for (int i = length; i < kk; i++) + buffer[i] = '0'; + buffer[kk] = '.'; + buffer[kk + 1] = '0'; + return &buffer[kk + 2]; + } + */ + if 0 <= k && kk <= 21 { + // 1234e7 -> 12340000000 + for i in length..kk { + *buffer.offset(i) = b'0'; + } + *buffer.offset(kk) = b'.'; + *buffer.offset(kk + 1) = b'0'; + buffer.offset(kk + 2) + } + + /* + else if (0 < kk && kk <= 21) { + // 1234e-2 -> 12.34 + std::memmove(&buffer[kk + 1], &buffer[kk], static_cast(length - kk)); + buffer[kk] = '.'; + if (0 > k + maxDecimalPlaces) { + // When maxDecimalPlaces = 2, 1.2345 -> 1.23, 1.102 -> 1.1 + // Remove extra trailing zeros (at least one) after truncation. + for (int i = kk + maxDecimalPlaces; i > kk + 1; i--) + if (buffer[i] != '0') + return &buffer[i + 1]; + return &buffer[kk + 2]; // Reserve one zero + } + else + return &buffer[length + 1]; + } + */ + else if 0 < kk && kk <= 21 { + // 1234e-2 -> 12.34 + ptr::copy(buffer.offset(kk), buffer.offset(kk + 1), (length - kk) as usize); + *buffer.offset(kk) = b'.'; + if 0 > k + MAX_DECIMAL_PLACES { + // When MAX_DECIMAL_PLACES = 2, 1.2345 -> 1.23, 1.102 -> 1.1 + // Remove extra trailing zeros (at least one) after truncation. + for i in (kk + 2 .. kk + MAX_DECIMAL_PLACES + 1).rev() { + if *buffer.offset(i) != b'0' { + return buffer.offset(i + 1); + } + } + buffer.offset(kk + 2) // Reserve one zero + } else { + buffer.offset(length + 1) + } + } + + /* + else if (-6 < kk && kk <= 0) { + // 1234e-6 -> 0.001234 + const int offset = 2 - kk; + std::memmove(&buffer[offset], &buffer[0], static_cast(length)); + buffer[0] = '0'; + buffer[1] = '.'; + for (int i = 2; i < offset; i++) + buffer[i] = '0'; + if (length - kk > maxDecimalPlaces) { + // When maxDecimalPlaces = 2, 0.123 -> 0.12, 0.102 -> 0.1 + // Remove extra trailing zeros (at least one) after truncation. + for (int i = maxDecimalPlaces + 1; i > 2; i--) + if (buffer[i] != '0') + return &buffer[i + 1]; + return &buffer[3]; // Reserve one zero + } + else + return &buffer[length + offset]; + } + */ + else if -6 < kk && kk <= 0 { + // 1234e-6 -> 0.001234 + let offset = 2 - kk; + ptr::copy(buffer, buffer.offset(offset), length as usize); + *buffer = b'0'; + *buffer.offset(1) = b'.'; + for i in 2..offset { + *buffer.offset(i) = b'0'; + } + if length - kk > MAX_DECIMAL_PLACES { + // When MAX_DECIMAL_PLACES = 2, 0.123 -> 0.12, 0.102 -> 0.1 + // Remove extra trailing zeros (at least one) after truncation. + for i in (3 .. MAX_DECIMAL_PLACES + 2).rev() { + if *buffer.offset(i) != b'0' { + return buffer.offset(i + 1); + } + } + buffer.offset(3) // Reserve one zero + } else { + buffer.offset(length + offset) + } + } + + /* + else if (kk < -maxDecimalPlaces) { + // Truncate to zero + buffer[0] = '0'; + buffer[1] = '.'; + buffer[2] = '0'; + return &buffer[3]; + } + */ + else if kk < -MAX_DECIMAL_PLACES { + *buffer = b'0'; + *buffer.offset(1) = b'.'; + *buffer.offset(2) = b'0'; + buffer.offset(3) + } + + /* + else if (length == 1) { + // 1e30 + buffer[1] = 'e'; + return WriteExponent(kk - 1, &buffer[2]); + } + */ + else if length == 1 { + // 1e30 + *buffer.offset(1) = b'e'; + write_exponent(kk - 1, buffer.offset(2)) + } + + /* + else { + // 1234e30 -> 1.234e33 + std::memmove(&buffer[2], &buffer[1], static_cast(length - 1)); + buffer[1] = '.'; + buffer[length + 1] = 'e'; + return WriteExponent(kk - 1, &buffer[0 + length + 2]); + } + */ + else { + // 1234e30 -> 1.234e33 + ptr::copy(buffer.offset(1), buffer.offset(2), (length - 1) as usize); + *buffer.offset(1) = b'.'; + *buffer.offset(length + 1) = b'e'; + write_exponent(kk - 1, buffer.offset(length + 2)) + } +} + +/* +inline char* dtoa(double value, char* buffer, int maxDecimalPlaces = 324) { + RAPIDJSON_ASSERT(maxDecimalPlaces >= 1); + Double d(value); + if (d.IsZero()) { + if (d.Sign()) + *buffer++ = '-'; // -0.0, Issue #289 + buffer[0] = '0'; + buffer[1] = '.'; + buffer[2] = '0'; + return &buffer[3]; + } + else { + if (value < 0) { + *buffer++ = '-'; + value = -value; + } + int length, K; + Grisu2(value, buffer, &length, &K); + return Prettify(buffer, length, K, maxDecimalPlaces); + } +} +*/ + +#[inline] +unsafe fn dtoa(mut wr: W, mut value: $fty) -> io::Result { + if value == 0.0 { + if value.is_sign_negative() { + try!(wr.write_all(b"-0.0")); + Ok(4) + } else { + try!(wr.write_all(b"0.0")); + Ok(3) + } + } else { + let negative = value < 0.0; + if negative { + try!(wr.write_all(b"-")); + value = -value; + } + let mut buffer: [u8; 24] = mem::uninitialized(); + let buf_ptr = buffer.as_mut_ptr(); + let (length, k) = grisu2(value, buf_ptr); + let end = prettify(buf_ptr, length, k); + let len = end as usize - buf_ptr as usize; + try!(wr.write_all(slice::from_raw_parts(buf_ptr, len))); + if negative { + Ok(len + 1) + } else { + Ok(len) + } + } +} + +}} diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/src/lib.rs cargo-0.19.0/vendor/dtoa-0.4.1/src/lib.rs --- cargo-0.17.0/vendor/dtoa-0.4.1/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/src/lib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,146 @@ +// Copyright 2016 Dtoa Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[macro_use] mod diyfp; +#[macro_use] mod dtoa; + +use std::{io, mem, ops, ptr, slice}; + +#[inline] +pub fn write(wr: W, value: V) -> io::Result { + value.write(wr) +} + +pub trait Floating { + fn write(self, W) -> io::Result; +} + +impl Floating for f32 { + fn write(self, wr: W) -> io::Result { + dtoa! { + floating_type: f32, + significand_type: u32, + exponent_type: i32, + + diy_significand_size: 32, + significand_size: 23, + exponent_bias: 0x7F, + mask_type: u32, + exponent_mask: 0x7F800000, + significand_mask: 0x007FFFFF, + hidden_bit: 0x00800000, + cached_powers_f: CACHED_POWERS_F_32, + cached_powers_e: CACHED_POWERS_E_32, + min_power: (-36), + }; + unsafe { dtoa(wr, self) } + } +} + +impl Floating for f64 { + fn write(self, wr: W) -> io::Result { + dtoa! { + floating_type: f64, + significand_type: u64, + exponent_type: isize, + + diy_significand_size: 64, + significand_size: 52, + exponent_bias: 0x3FF, + mask_type: u64, + exponent_mask: 0x7FF0000000000000, + significand_mask: 0x000FFFFFFFFFFFFF, + hidden_bit: 0x0010000000000000, + cached_powers_f: CACHED_POWERS_F_64, + cached_powers_e: CACHED_POWERS_E_64, + min_power: (-348), + }; + unsafe { dtoa(wr, self) } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +const MAX_DECIMAL_PLACES: isize = 324; + +static DEC_DIGITS_LUT: &'static [u8] = + b"0001020304050607080910111213141516171819\ + 2021222324252627282930313233343536373839\ + 4041424344454647484950515253545556575859\ + 6061626364656667686970717273747576777879\ + 8081828384858687888990919293949596979899"; + +// 10^-36, 10^-28, ..., 10^52 +static CACHED_POWERS_F_32: [u32; 12] = [ + 0xaa242499, 0xfd87b5f3, 0xbce50865, 0x8cbccc09, + 0xd1b71759, 0x9c400000, 0xe8d4a510, 0xad78ebc6, + 0x813f3979, 0xc097ce7c, 0x8f7e32ce, 0xd5d238a5, +]; + +static CACHED_POWERS_E_32: [i16; 12] = [ + -151, -125, -98, -71, -45, -18, 8, 35, 62, 88, 115, 141, +]; + +// 10^-348, 10^-340, ..., 10^340 +static CACHED_POWERS_F_64: [u64; 87] = [ + 0xfa8fd5a0081c0288, 0xbaaee17fa23ebf76, + 0x8b16fb203055ac76, 0xcf42894a5dce35ea, + 0x9a6bb0aa55653b2d, 0xe61acf033d1a45df, + 0xab70fe17c79ac6ca, 0xff77b1fcbebcdc4f, + 0xbe5691ef416bd60c, 0x8dd01fad907ffc3c, + 0xd3515c2831559a83, 0x9d71ac8fada6c9b5, + 0xea9c227723ee8bcb, 0xaecc49914078536d, + 0x823c12795db6ce57, 0xc21094364dfb5637, + 0x9096ea6f3848984f, 0xd77485cb25823ac7, + 0xa086cfcd97bf97f4, 0xef340a98172aace5, + 0xb23867fb2a35b28e, 0x84c8d4dfd2c63f3b, + 0xc5dd44271ad3cdba, 0x936b9fcebb25c996, + 0xdbac6c247d62a584, 0xa3ab66580d5fdaf6, + 0xf3e2f893dec3f126, 0xb5b5ada8aaff80b8, + 0x87625f056c7c4a8b, 0xc9bcff6034c13053, + 0x964e858c91ba2655, 0xdff9772470297ebd, + 0xa6dfbd9fb8e5b88f, 0xf8a95fcf88747d94, + 0xb94470938fa89bcf, 0x8a08f0f8bf0f156b, + 0xcdb02555653131b6, 0x993fe2c6d07b7fac, + 0xe45c10c42a2b3b06, 0xaa242499697392d3, + 0xfd87b5f28300ca0e, 0xbce5086492111aeb, + 0x8cbccc096f5088cc, 0xd1b71758e219652c, + 0x9c40000000000000, 0xe8d4a51000000000, + 0xad78ebc5ac620000, 0x813f3978f8940984, + 0xc097ce7bc90715b3, 0x8f7e32ce7bea5c70, + 0xd5d238a4abe98068, 0x9f4f2726179a2245, + 0xed63a231d4c4fb27, 0xb0de65388cc8ada8, + 0x83c7088e1aab65db, 0xc45d1df942711d9a, + 0x924d692ca61be758, 0xda01ee641a708dea, + 0xa26da3999aef774a, 0xf209787bb47d6b85, + 0xb454e4a179dd1877, 0x865b86925b9bc5c2, + 0xc83553c5c8965d3d, 0x952ab45cfa97a0b3, + 0xde469fbd99a05fe3, 0xa59bc234db398c25, + 0xf6c69a72a3989f5c, 0xb7dcbf5354e9bece, + 0x88fcf317f22241e2, 0xcc20ce9bd35c78a5, + 0x98165af37b2153df, 0xe2a0b5dc971f303a, + 0xa8d9d1535ce3b396, 0xfb9b7cd9a4a7443c, + 0xbb764c4ca7a44410, 0x8bab8eefb6409c1a, + 0xd01fef10a657842c, 0x9b10a4e5e9913129, + 0xe7109bfba19c0c9d, 0xac2820d9623bf429, + 0x80444b5e7aa7cf85, 0xbf21e44003acdd2d, + 0x8e679c2f5e44ff8f, 0xd433179d9c8cb841, + 0x9e19db92b4e31ba9, 0xeb96bf6ebadf77d9, + 0xaf87023b9bf0ee6b, +]; +static CACHED_POWERS_E_64: [i16; 87] = [ + -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, + -954, -927, -901, -874, -847, -821, -794, -768, -741, -715, + -688, -661, -635, -608, -582, -555, -529, -502, -475, -449, + -422, -396, -369, -343, -316, -289, -263, -236, -210, -183, + -157, -130, -103, -77, -50, -24, 3, 30, 56, 83, + 109, 136, 162, 189, 216, 242, 269, 295, 322, 348, + 375, 402, 428, 455, 481, 508, 534, 561, 588, 614, + 641, 667, 694, 720, 747, 774, 800, 827, 853, 880, + 907, 933, 960, 986, 1013, 1039, 1066, +]; diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/tests/test.rs cargo-0.19.0/vendor/dtoa-0.4.1/tests/test.rs --- cargo-0.17.0/vendor/dtoa-0.4.1/tests/test.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/tests/test.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,38 @@ +extern crate dtoa; + +use std::str; + +#[test] +fn test_f64() { + test_write(1.234e20f64, "123400000000000000000.0"); + test_write(1.234e21f64, "1.234e21"); + test_write(2.71828f64, "2.71828"); + test_write(0.0f64, "0.0"); + test_write(-0.0f64, "-0.0"); + test_write(1.1e128f64, "1.1e128"); + test_write(1.1e-64f64, "1.1e-64"); + test_write(2.718281828459045f64, "2.718281828459045"); + test_write(5e-324f64, "5e-324"); + test_write(::std::f64::MAX, "1.7976931348623157e308"); +} + +#[test] +fn test_f32() { + test_write(1.234e20f32, "123400000000000000000.0"); + test_write(1.234e21f32, "1.234e21"); + test_write(2.71828f32, "2.71828"); + test_write(0.0f32, "0.0"); + test_write(-0.0f32, "-0.0"); + test_write(1.1e32f32, "1.1e32"); + test_write(1.1e-32f32, "1.1e-32"); + test_write(2.7182817f32, "2.7182817"); + test_write(1e-45f32, "1e-45"); + test_write(::std::f32::MAX, "3.4028235e38"); +} + +fn test_write(value: F, expected: &'static str) { + let mut buf = [b'\0'; 30]; + let len = dtoa::write(&mut buf[..], value).unwrap(); + let result = str::from_utf8(&buf[..len]).unwrap(); + assert_eq!(result, expected.to_string()); +} diff -Nru cargo-0.17.0/vendor/dtoa-0.4.1/.travis.yml cargo-0.19.0/vendor/dtoa-0.4.1/.travis.yml --- cargo-0.17.0/vendor/dtoa-0.4.1/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/dtoa-0.4.1/.travis.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,6 @@ +sudo: false + +language: rust + +rust: + - nightly diff -Nru cargo-0.17.0/vendor/env_logger-0.3.5/.cargo-checksum.json cargo-0.19.0/vendor/env_logger-0.3.5/.cargo-checksum.json --- cargo-0.17.0/vendor/env_logger-0.3.5/.cargo-checksum.json 2017-03-24 16:59:55.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.3.5/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"4af0565a97a599bba727315d9aff1f57a350dcfee7d9f00986c851e54a24b4ca","src/lib.rs":"484cec14a5f18a25b71d7b1842f7b184f0530165021b71b36dde9fc57b7fc15a","src/regex.rs":"d8e2a6958d4ed8084867063aae4b5c77ffc5d271dc2e17909d56c5a5e1552034","src/string.rs":"26ede9ab41a2673c3ad6001bc1802c005ce9a4f190f55860a24aa66b6b71bbc7","tests/regexp_filter.rs":"a3f9c01623e90e54b247a62c53b25caf5f502d054f28c0bdf92abbea486a95b5"},"package":"15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/env_logger-0.3.5/Cargo.toml cargo-0.19.0/vendor/env_logger-0.3.5/Cargo.toml --- cargo-0.17.0/vendor/env_logger-0.3.5/Cargo.toml 2017-03-24 16:59:55.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.3.5/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -[package] -name = "env_logger" -version = "0.3.5" -authors = ["The Rust Project Developers"] -license = "MIT/Apache-2.0" -repository = "https://github.com/rust-lang/log" -documentation = "http://doc.rust-lang.org/log/env_logger" -homepage = "https://github.com/rust-lang/log" -description = """ -An logging implementation for `log` which is configured via an environment -variable. -""" - -[dependencies] -log = { version = "0.3", path = ".." } -regex = { version = "0.1", optional = true } - -[[test]] -name = "regexp_filter" -harness = false - -[features] -default = ["regex"] diff -Nru cargo-0.17.0/vendor/env_logger-0.3.5/src/lib.rs cargo-0.19.0/vendor/env_logger-0.3.5/src/lib.rs --- cargo-0.17.0/vendor/env_logger-0.3.5/src/lib.rs 2017-03-24 16:59:55.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.3.5/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,623 +0,0 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A logger configured via an environment variable which writes to standard -//! error. -//! -//! ## Example -//! -//! ``` -//! #[macro_use] extern crate log; -//! extern crate env_logger; -//! -//! use log::LogLevel; -//! -//! fn main() { -//! env_logger::init().unwrap(); -//! -//! debug!("this is a debug {}", "message"); -//! error!("this is printed by default"); -//! -//! if log_enabled!(LogLevel::Info) { -//! let x = 3 * 4; // expensive computation -//! info!("the answer was: {}", x); -//! } -//! } -//! ``` -//! -//! Assumes the binary is `main`: -//! -//! ```{.bash} -//! $ RUST_LOG=error ./main -//! ERROR:main: this is printed by default -//! ``` -//! -//! ```{.bash} -//! $ RUST_LOG=info ./main -//! ERROR:main: this is printed by default -//! INFO:main: the answer was: 12 -//! ``` -//! -//! ```{.bash} -//! $ RUST_LOG=debug ./main -//! DEBUG:main: this is a debug message -//! ERROR:main: this is printed by default -//! INFO:main: the answer was: 12 -//! ``` -//! -//! You can also set the log level on a per module basis: -//! -//! ```{.bash} -//! $ RUST_LOG=main=info ./main -//! ERROR:main: this is printed by default -//! INFO:main: the answer was: 12 -//! ``` -//! -//! And enable all logging: -//! -//! ```{.bash} -//! $ RUST_LOG=main ./main -//! DEBUG:main: this is a debug message -//! ERROR:main: this is printed by default -//! INFO:main: the answer was: 12 -//! ``` -//! -//! See the documentation for the log crate for more information about its API. -//! -//! ## Enabling logging -//! -//! Log levels are controlled on a per-module basis, and by default all logging -//! is disabled except for `error!`. Logging is controlled via the `RUST_LOG` -//! environment variable. The value of this environment variable is a -//! comma-separated list of logging directives. A logging directive is of the -//! form: -//! -//! ```text -//! path::to::module=log_level -//! ``` -//! -//! The path to the module is rooted in the name of the crate it was compiled -//! for, so if your program is contained in a file `hello.rs`, for example, to -//! turn on logging for this file you would use a value of `RUST_LOG=hello`. -//! Furthermore, this path is a prefix-search, so all modules nested in the -//! specified module will also have logging enabled. -//! -//! The actual `log_level` is optional to specify. If omitted, all logging will -//! be enabled. If specified, it must be one of the strings `debug`, `error`, -//! `info`, `warn`, or `trace`. -//! -//! As the log level for a module is optional, the module to enable logging for -//! is also optional. If only a `log_level` is provided, then the global log -//! level for all modules is set to this value. -//! -//! Some examples of valid values of `RUST_LOG` are: -//! -//! * `hello` turns on all logging for the 'hello' module -//! * `info` turns on all info logging -//! * `hello=debug` turns on debug logging for 'hello' -//! * `hello,std::option` turns on hello, and std's option logging -//! * `error,hello=warn` turn on global error logging and also warn for hello -//! -//! ## Filtering results -//! -//! A RUST_LOG directive may include a regex filter. The syntax is to append `/` -//! followed by a regex. Each message is checked against the regex, and is only -//! logged if it matches. Note that the matching is done after formatting the -//! log string but before adding any logging meta-data. There is a single filter -//! for all modules. -//! -//! Some examples: -//! -//! * `hello/foo` turns on all logging for the 'hello' module where the log -//! message includes 'foo'. -//! * `info/f.o` turns on all info logging where the log message includes 'foo', -//! 'f1o', 'fao', etc. -//! * `hello=debug/foo*foo` turns on debug logging for 'hello' where the log -//! message includes 'foofoo' or 'fofoo' or 'fooooooofoo', etc. -//! * `error,hello=warn/[0-9] scopes` turn on global error logging and also -//! warn for hello. In both cases the log message must include a single digit -//! number followed by 'scopes'. - -#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "http://www.rust-lang.org/favicon.ico", - html_root_url = "http://doc.rust-lang.org/env_logger/")] -#![cfg_attr(test, deny(warnings))] - -extern crate log; - -use std::env; -use std::io::prelude::*; -use std::io; -use std::mem; - -use log::{Log, LogLevel, LogLevelFilter, LogRecord, SetLoggerError, LogMetadata}; - -#[cfg(feature = "regex")] -#[path = "regex.rs"] -mod filter; - -#[cfg(not(feature = "regex"))] -#[path = "string.rs"] -mod filter; - -/// The logger. -pub struct Logger { - directives: Vec, - filter: Option, - format: Box String + Sync + Send>, -} - -/// LogBuilder acts as builder for initializing the Logger. -/// It can be used to customize the log format, change the enviromental variable used -/// to provide the logging directives and also set the default log level filter. -/// -/// ## Example -/// -/// ``` -/// #[macro_use] -/// extern crate log; -/// extern crate env_logger; -/// -/// use std::env; -/// use log::{LogRecord, LogLevelFilter}; -/// use env_logger::LogBuilder; -/// -/// fn main() { -/// let format = |record: &LogRecord| { -/// format!("{} - {}", record.level(), record.args()) -/// }; -/// -/// let mut builder = LogBuilder::new(); -/// builder.format(format).filter(None, LogLevelFilter::Info); -/// -/// if env::var("RUST_LOG").is_ok() { -/// builder.parse(&env::var("RUST_LOG").unwrap()); -/// } -/// -/// builder.init().unwrap(); -/// -/// error!("error message"); -/// info!("info message"); -/// } -/// ``` -pub struct LogBuilder { - directives: Vec, - filter: Option, - format: Box String + Sync + Send>, -} - -impl LogBuilder { - /// Initializes the log builder with defaults - pub fn new() -> LogBuilder { - LogBuilder { - directives: Vec::new(), - filter: None, - format: Box::new(|record: &LogRecord| { - format!("{}:{}: {}", record.level(), - record.location().module_path(), record.args()) - }), - } - } - - /// Adds filters to the logger - /// - /// The given module (if any) will log at most the specified level provided. - /// If no module is provided then the filter will apply to all log messages. - pub fn filter(&mut self, - module: Option<&str>, - level: LogLevelFilter) -> &mut Self { - self.directives.push(LogDirective { - name: module.map(|s| s.to_string()), - level: level, - }); - self - } - - /// Sets the format function for formatting the log output. - /// - /// This function is called on each record logged to produce a string which - /// is actually printed out. - pub fn format(&mut self, format: F) -> &mut Self - where F: Fn(&LogRecord) -> String + Sync + Send - { - self.format = Box::new(format); - self - } - - /// Parses the directives string in the same form as the RUST_LOG - /// environment variable. - /// - /// See the module documentation for more details. - pub fn parse(&mut self, filters: &str) -> &mut Self { - let (directives, filter) = parse_logging_spec(filters); - - self.filter = filter; - - for directive in directives { - self.directives.push(directive); - } - self - } - - /// Initializes the global logger with an env logger. - /// - /// This should be called early in the execution of a Rust program, and the - /// global logger may only be initialized once. Future initialization - /// attempts will return an error. - pub fn init(&mut self) -> Result<(), SetLoggerError> { - log::set_logger(|max_level| { - let logger = self.build(); - max_level.set(logger.filter()); - Box::new(logger) - }) - } - - /// Build an env logger. - pub fn build(&mut self) -> Logger { - if self.directives.is_empty() { - // Adds the default filter if none exist - self.directives.push(LogDirective { - name: None, - level: LogLevelFilter::Error, - }); - } else { - // Sort the directives by length of their name, this allows a - // little more efficient lookup at runtime. - self.directives.sort_by(|a, b| { - let alen = a.name.as_ref().map(|a| a.len()).unwrap_or(0); - let blen = b.name.as_ref().map(|b| b.len()).unwrap_or(0); - alen.cmp(&blen) - }); - } - - Logger { - directives: mem::replace(&mut self.directives, Vec::new()), - filter: mem::replace(&mut self.filter, None), - format: mem::replace(&mut self.format, Box::new(|_| String::new())), - } - } -} - -impl Logger { - pub fn new() -> Logger { - let mut builder = LogBuilder::new(); - - if let Ok(s) = env::var("RUST_LOG") { - builder.parse(&s); - } - - builder.build() - } - - pub fn filter(&self) -> LogLevelFilter { - self.directives.iter() - .map(|d| d.level).max() - .unwrap_or(LogLevelFilter::Off) - } - - fn enabled(&self, level: LogLevel, target: &str) -> bool { - // Search for the longest match, the vector is assumed to be pre-sorted. - for directive in self.directives.iter().rev() { - match directive.name { - Some(ref name) if !target.starts_with(&**name) => {}, - Some(..) | None => { - return level <= directive.level - } - } - } - false - } -} - -impl Log for Logger { - fn enabled(&self, metadata: &LogMetadata) -> bool { - self.enabled(metadata.level(), metadata.target()) - } - - fn log(&self, record: &LogRecord) { - if !Log::enabled(self, record.metadata()) { - return; - } - - if let Some(filter) = self.filter.as_ref() { - if !filter.is_match(&*record.args().to_string()) { - return; - } - } - - let _ = writeln!(&mut io::stderr(), "{}", (self.format)(record)); - } -} - -struct LogDirective { - name: Option, - level: LogLevelFilter, -} - -/// Initializes the global logger with an env logger. -/// -/// This should be called early in the execution of a Rust program, and the -/// global logger may only be initialized once. Future initialization attempts -/// will return an error. -pub fn init() -> Result<(), SetLoggerError> { - let mut builder = LogBuilder::new(); - - if let Ok(s) = env::var("RUST_LOG") { - builder.parse(&s); - } - - builder.init() -} - -/// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=error/foo") -/// and return a vector with log directives. -fn parse_logging_spec(spec: &str) -> (Vec, Option) { - let mut dirs = Vec::new(); - - let mut parts = spec.split('/'); - let mods = parts.next(); - let filter = parts.next(); - if parts.next().is_some() { - println!("warning: invalid logging spec '{}', \ - ignoring it (too many '/'s)", spec); - return (dirs, None); - } - mods.map(|m| { for s in m.split(',') { - if s.len() == 0 { continue } - let mut parts = s.split('='); - let (log_level, name) = match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) { - (Some(part0), None, None) => { - // if the single argument is a log-level string or number, - // treat that as a global fallback - match part0.parse() { - Ok(num) => (num, None), - Err(_) => (LogLevelFilter::max(), Some(part0)), - } - } - (Some(part0), Some(""), None) => (LogLevelFilter::max(), Some(part0)), - (Some(part0), Some(part1), None) => { - match part1.parse() { - Ok(num) => (num, Some(part0)), - _ => { - println!("warning: invalid logging spec '{}', \ - ignoring it", part1); - continue - } - } - }, - _ => { - println!("warning: invalid logging spec '{}', \ - ignoring it", s); - continue - } - }; - dirs.push(LogDirective { - name: name.map(|s| s.to_string()), - level: log_level, - }); - }}); - - let filter = filter.map_or(None, |filter| { - match filter::Filter::new(filter) { - Ok(re) => Some(re), - Err(e) => { - println!("warning: invalid regex filter - {}", e); - None - } - } - }); - - return (dirs, filter); -} - -#[cfg(test)] -mod tests { - use log::{LogLevel, LogLevelFilter}; - - use super::{LogBuilder, Logger, LogDirective, parse_logging_spec}; - - fn make_logger(dirs: Vec) -> Logger { - let mut logger = LogBuilder::new().build(); - logger.directives = dirs; - logger - } - - #[test] - fn filter_info() { - let logger = LogBuilder::new().filter(None, LogLevelFilter::Info).build(); - assert!(logger.enabled(LogLevel::Info, "crate1")); - assert!(!logger.enabled(LogLevel::Debug, "crate1")); - } - - #[test] - fn filter_beginning_longest_match() { - let logger = LogBuilder::new() - .filter(Some("crate2"), LogLevelFilter::Info) - .filter(Some("crate2::mod"), LogLevelFilter::Debug) - .filter(Some("crate1::mod1"), LogLevelFilter::Warn) - .build(); - assert!(logger.enabled(LogLevel::Debug, "crate2::mod1")); - assert!(!logger.enabled(LogLevel::Debug, "crate2")); - } - - #[test] - fn parse_default() { - let logger = LogBuilder::new().parse("info,crate1::mod1=warn").build(); - assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); - assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); - } - - #[test] - fn match_full_path() { - let logger = make_logger(vec![ - LogDirective { - name: Some("crate2".to_string()), - level: LogLevelFilter::Info - }, - LogDirective { - name: Some("crate1::mod1".to_string()), - level: LogLevelFilter::Warn - } - ]); - assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); - assert!(!logger.enabled(LogLevel::Info, "crate1::mod1")); - assert!(logger.enabled(LogLevel::Info, "crate2")); - assert!(!logger.enabled(LogLevel::Debug, "crate2")); - } - - #[test] - fn no_match() { - let logger = make_logger(vec![ - LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, - LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } - ]); - assert!(!logger.enabled(LogLevel::Warn, "crate3")); - } - - #[test] - fn match_beginning() { - let logger = make_logger(vec![ - LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, - LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } - ]); - assert!(logger.enabled(LogLevel::Info, "crate2::mod1")); - } - - #[test] - fn match_beginning_longest_match() { - let logger = make_logger(vec![ - LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, - LogDirective { name: Some("crate2::mod".to_string()), level: LogLevelFilter::Debug }, - LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } - ]); - assert!(logger.enabled(LogLevel::Debug, "crate2::mod1")); - assert!(!logger.enabled(LogLevel::Debug, "crate2")); - } - - #[test] - fn match_default() { - let logger = make_logger(vec![ - LogDirective { name: None, level: LogLevelFilter::Info }, - LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } - ]); - assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); - assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); - } - - #[test] - fn zero_level() { - let logger = make_logger(vec![ - LogDirective { name: None, level: LogLevelFilter::Info }, - LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Off } - ]); - assert!(!logger.enabled(LogLevel::Error, "crate1::mod1")); - assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); - } - - #[test] - fn parse_logging_spec_valid() { - let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug"); - assert_eq!(dirs.len(), 3); - assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); - assert_eq!(dirs[0].level, LogLevelFilter::Error); - - assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); - assert_eq!(dirs[1].level, LogLevelFilter::max()); - - assert_eq!(dirs[2].name, Some("crate2".to_string())); - assert_eq!(dirs[2].level, LogLevelFilter::Debug); - assert!(filter.is_none()); - } - - #[test] - fn parse_logging_spec_invalid_crate() { - // test parse_logging_spec with multiple = in specification - let (dirs, filter) = parse_logging_spec("crate1::mod1=warn=info,crate2=debug"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate2".to_string())); - assert_eq!(dirs[0].level, LogLevelFilter::Debug); - assert!(filter.is_none()); - } - - #[test] - fn parse_logging_spec_invalid_log_level() { - // test parse_logging_spec with 'noNumber' as log level - let (dirs, filter) = parse_logging_spec("crate1::mod1=noNumber,crate2=debug"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate2".to_string())); - assert_eq!(dirs[0].level, LogLevelFilter::Debug); - assert!(filter.is_none()); - } - - #[test] - fn parse_logging_spec_string_log_level() { - // test parse_logging_spec with 'warn' as log level - let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=warn"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate2".to_string())); - assert_eq!(dirs[0].level, LogLevelFilter::Warn); - assert!(filter.is_none()); - } - - #[test] - fn parse_logging_spec_empty_log_level() { - // test parse_logging_spec with '' as log level - let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2="); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate2".to_string())); - assert_eq!(dirs[0].level, LogLevelFilter::max()); - assert!(filter.is_none()); - } - - #[test] - fn parse_logging_spec_global() { - // test parse_logging_spec with no crate - let (dirs, filter) = parse_logging_spec("warn,crate2=debug"); - assert_eq!(dirs.len(), 2); - assert_eq!(dirs[0].name, None); - assert_eq!(dirs[0].level, LogLevelFilter::Warn); - assert_eq!(dirs[1].name, Some("crate2".to_string())); - assert_eq!(dirs[1].level, LogLevelFilter::Debug); - assert!(filter.is_none()); - } - - #[test] - fn parse_logging_spec_valid_filter() { - let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug/abc"); - assert_eq!(dirs.len(), 3); - assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); - assert_eq!(dirs[0].level, LogLevelFilter::Error); - - assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); - assert_eq!(dirs[1].level, LogLevelFilter::max()); - - assert_eq!(dirs[2].name, Some("crate2".to_string())); - assert_eq!(dirs[2].level, LogLevelFilter::Debug); - assert!(filter.is_some() && filter.unwrap().to_string() == "abc"); - } - - #[test] - fn parse_logging_spec_invalid_crate_filter() { - let (dirs, filter) = parse_logging_spec("crate1::mod1=error=warn,crate2=debug/a.c"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate2".to_string())); - assert_eq!(dirs[0].level, LogLevelFilter::Debug); - assert!(filter.is_some() && filter.unwrap().to_string() == "a.c"); - } - - #[test] - fn parse_logging_spec_empty_with_filter() { - let (dirs, filter) = parse_logging_spec("crate1/a*c"); - assert_eq!(dirs.len(), 1); - assert_eq!(dirs[0].name, Some("crate1".to_string())); - assert_eq!(dirs[0].level, LogLevelFilter::max()); - assert!(filter.is_some() && filter.unwrap().to_string() == "a*c"); - } -} diff -Nru cargo-0.17.0/vendor/env_logger-0.3.5/src/regex.rs cargo-0.19.0/vendor/env_logger-0.3.5/src/regex.rs --- cargo-0.17.0/vendor/env_logger-0.3.5/src/regex.rs 2017-03-24 16:59:55.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.3.5/src/regex.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -extern crate regex; - -use std::fmt; - -use self::regex::Regex; - -pub struct Filter { - inner: Regex, -} - -impl Filter { - pub fn new(spec: &str) -> Result { - match Regex::new(spec){ - Ok(r) => Ok(Filter { inner: r }), - Err(e) => Err(e.to_string()), - } - } - - pub fn is_match(&self, s: &str) -> bool { - self.inner.is_match(s) - } -} - -impl fmt::Display for Filter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} diff -Nru cargo-0.17.0/vendor/env_logger-0.3.5/src/string.rs cargo-0.19.0/vendor/env_logger-0.3.5/src/string.rs --- cargo-0.17.0/vendor/env_logger-0.3.5/src/string.rs 2017-03-24 16:59:55.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.3.5/src/string.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -use std::fmt; - -pub struct Filter { - inner: String, -} - -impl Filter { - pub fn new(spec: &str) -> Result { - Ok(Filter { inner: spec.to_string() }) - } - - pub fn is_match(&self, s: &str) -> bool { - s.contains(&self.inner) - } -} - -impl fmt::Display for Filter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} diff -Nru cargo-0.17.0/vendor/env_logger-0.3.5/tests/regexp_filter.rs cargo-0.19.0/vendor/env_logger-0.3.5/tests/regexp_filter.rs --- cargo-0.17.0/vendor/env_logger-0.3.5/tests/regexp_filter.rs 2017-03-24 16:59:55.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.3.5/tests/regexp_filter.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -#[macro_use] extern crate log; -extern crate env_logger; - -use std::process; -use std::env; -use std::str; - -fn main() { - if env::var("LOG_REGEXP_TEST").ok() == Some(String::from("1")) { - child_main(); - } else { - parent_main() - } -} - -fn child_main() { - env_logger::init().unwrap(); - info!("XYZ Message"); -} - -fn run_child(rust_log: String) -> bool { - let exe = env::current_exe().unwrap(); - let out = process::Command::new(exe) - .env("LOG_REGEXP_TEST", "1") - .env("RUST_LOG", rust_log) - .output() - .unwrap_or_else(|e| panic!("Unable to start child process: {}", e)); - str::from_utf8(out.stderr.as_ref()).unwrap().contains("XYZ Message") -} - -fn assert_message_printed(rust_log: &str) { - if !run_child(rust_log.to_string()) { - panic!("RUST_LOG={} should allow the test log message", rust_log) - } -} - -fn assert_message_not_printed(rust_log: &str) { - if run_child(rust_log.to_string()) { - panic!("RUST_LOG={} should not allow the test log message", rust_log) - } -} - -fn parent_main() { - // test normal log severity levels - assert_message_printed("info"); - assert_message_not_printed("warn"); - - // test of regular expression filters - assert_message_printed("info/XYZ"); - assert_message_not_printed("info/XXX"); -} diff -Nru cargo-0.17.0/vendor/env_logger-0.4.2/.cargo-checksum.json cargo-0.19.0/vendor/env_logger-0.4.2/.cargo-checksum.json --- cargo-0.17.0/vendor/env_logger-0.4.2/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.4.2/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"e95e32beee7a6fe5a403ce27e1e2b72977c00ea16599fc299650a6ebfe2a4e96","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","src/lib.rs":"fa24e65adbd610db23c8bb75f306c595d1e5e44e3b30f2a23644af4b30c628eb","src/regex.rs":"d8e2a6958d4ed8084867063aae4b5c77ffc5d271dc2e17909d56c5a5e1552034","src/string.rs":"26ede9ab41a2673c3ad6001bc1802c005ce9a4f190f55860a24aa66b6b71bbc7","tests/regexp_filter.rs":"a3f9c01623e90e54b247a62c53b25caf5f502d054f28c0bdf92abbea486a95b5"},"package":"e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/env_logger-0.4.2/Cargo.toml cargo-0.19.0/vendor/env_logger-0.4.2/Cargo.toml --- cargo-0.17.0/vendor/env_logger-0.4.2/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.4.2/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,24 @@ +[package] +name = "env_logger" +version = "0.4.2" +authors = ["The Rust Project Developers"] +license = "MIT/Apache-2.0" +repository = "https://github.com/rust-lang/log" +documentation = "http://doc.rust-lang.org/log/env_logger" +homepage = "https://github.com/rust-lang/log" +description = """ +A logging implementation for `log` which is configured via an environment +variable. +""" +categories = ["development-tools::debugging"] + +[dependencies] +log = { version = "0.3", path = ".." } +regex = { version = "0.2", optional = true } + +[[test]] +name = "regexp_filter" +harness = false + +[features] +default = ["regex"] diff -Nru cargo-0.17.0/vendor/env_logger-0.4.2/LICENSE-APACHE cargo-0.19.0/vendor/env_logger-0.4.2/LICENSE-APACHE --- cargo-0.17.0/vendor/env_logger-0.4.2/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.4.2/LICENSE-APACHE 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.17.0/vendor/env_logger-0.4.2/LICENSE-MIT cargo-0.19.0/vendor/env_logger-0.4.2/LICENSE-MIT --- cargo-0.17.0/vendor/env_logger-0.4.2/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.4.2/LICENSE-MIT 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/env_logger-0.4.2/src/lib.rs cargo-0.19.0/vendor/env_logger-0.4.2/src/lib.rs --- cargo-0.17.0/vendor/env_logger-0.4.2/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.4.2/src/lib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,628 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A logger configured via an environment variable which writes to standard +//! error. +//! +//! ## Example +//! +//! ``` +//! #[macro_use] extern crate log; +//! extern crate env_logger; +//! +//! use log::LogLevel; +//! +//! fn main() { +//! env_logger::init().unwrap(); +//! +//! debug!("this is a debug {}", "message"); +//! error!("this is printed by default"); +//! +//! if log_enabled!(LogLevel::Info) { +//! let x = 3 * 4; // expensive computation +//! info!("the answer was: {}", x); +//! } +//! } +//! ``` +//! +//! Assumes the binary is `main`: +//! +//! ```{.bash} +//! $ RUST_LOG=error ./main +//! ERROR:main: this is printed by default +//! ``` +//! +//! ```{.bash} +//! $ RUST_LOG=info ./main +//! ERROR:main: this is printed by default +//! INFO:main: the answer was: 12 +//! ``` +//! +//! ```{.bash} +//! $ RUST_LOG=debug ./main +//! DEBUG:main: this is a debug message +//! ERROR:main: this is printed by default +//! INFO:main: the answer was: 12 +//! ``` +//! +//! You can also set the log level on a per module basis: +//! +//! ```{.bash} +//! $ RUST_LOG=main=info ./main +//! ERROR:main: this is printed by default +//! INFO:main: the answer was: 12 +//! ``` +//! +//! And enable all logging: +//! +//! ```{.bash} +//! $ RUST_LOG=main ./main +//! DEBUG:main: this is a debug message +//! ERROR:main: this is printed by default +//! INFO:main: the answer was: 12 +//! ``` +//! +//! See the documentation for the log crate for more information about its API. +//! +//! ## Enabling logging +//! +//! Log levels are controlled on a per-module basis, and by default all logging +//! is disabled except for `error!`. Logging is controlled via the `RUST_LOG` +//! environment variable. The value of this environment variable is a +//! comma-separated list of logging directives. A logging directive is of the +//! form: +//! +//! ```text +//! path::to::module=log_level +//! ``` +//! +//! The path to the module is rooted in the name of the crate it was compiled +//! for, so if your program is contained in a file `hello.rs`, for example, to +//! turn on logging for this file you would use a value of `RUST_LOG=hello`. +//! Furthermore, this path is a prefix-search, so all modules nested in the +//! specified module will also have logging enabled. +//! +//! The actual `log_level` is optional to specify. If omitted, all logging will +//! be enabled. If specified, it must be one of the strings `debug`, `error`, +//! `info`, `warn`, or `trace`. +//! +//! As the log level for a module is optional, the module to enable logging for +//! is also optional. If only a `log_level` is provided, then the global log +//! level for all modules is set to this value. +//! +//! Some examples of valid values of `RUST_LOG` are: +//! +//! * `hello` turns on all logging for the 'hello' module +//! * `info` turns on all info logging +//! * `hello=debug` turns on debug logging for 'hello' +//! * `hello,std::option` turns on hello, and std's option logging +//! * `error,hello=warn` turn on global error logging and also warn for hello +//! +//! ## Filtering results +//! +//! A RUST_LOG directive may include a regex filter. The syntax is to append `/` +//! followed by a regex. Each message is checked against the regex, and is only +//! logged if it matches. Note that the matching is done after formatting the +//! log string but before adding any logging meta-data. There is a single filter +//! for all modules. +//! +//! Some examples: +//! +//! * `hello/foo` turns on all logging for the 'hello' module where the log +//! message includes 'foo'. +//! * `info/f.o` turns on all info logging where the log message includes 'foo', +//! 'f1o', 'fao', etc. +//! * `hello=debug/foo*foo` turns on debug logging for 'hello' where the log +//! message includes 'foofoo' or 'fofoo' or 'fooooooofoo', etc. +//! * `error,hello=warn/[0-9] scopes` turn on global error logging and also +//! warn for hello. In both cases the log message must include a single digit +//! number followed by 'scopes'. + +#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "http://www.rust-lang.org/favicon.ico", + html_root_url = "http://doc.rust-lang.org/env_logger/")] +#![cfg_attr(test, deny(warnings))] + +// When compiled for the rustc compiler itself we want to make sure that this is +// an unstable crate +#![cfg_attr(rustbuild, feature(staged_api, rustc_private))] +#![cfg_attr(rustbuild, unstable(feature = "rustc_private", issue = "27812"))] + +extern crate log; + +use std::env; +use std::io::prelude::*; +use std::io; +use std::mem; + +use log::{Log, LogLevel, LogLevelFilter, LogRecord, SetLoggerError, LogMetadata}; + +#[cfg(feature = "regex")] +#[path = "regex.rs"] +mod filter; + +#[cfg(not(feature = "regex"))] +#[path = "string.rs"] +mod filter; + +/// The logger. +pub struct Logger { + directives: Vec, + filter: Option, + format: Box String + Sync + Send>, +} + +/// LogBuilder acts as builder for initializing the Logger. +/// It can be used to customize the log format, change the enviromental variable used +/// to provide the logging directives and also set the default log level filter. +/// +/// ## Example +/// +/// ``` +/// #[macro_use] +/// extern crate log; +/// extern crate env_logger; +/// +/// use std::env; +/// use log::{LogRecord, LogLevelFilter}; +/// use env_logger::LogBuilder; +/// +/// fn main() { +/// let format = |record: &LogRecord| { +/// format!("{} - {}", record.level(), record.args()) +/// }; +/// +/// let mut builder = LogBuilder::new(); +/// builder.format(format).filter(None, LogLevelFilter::Info); +/// +/// if env::var("RUST_LOG").is_ok() { +/// builder.parse(&env::var("RUST_LOG").unwrap()); +/// } +/// +/// builder.init().unwrap(); +/// +/// error!("error message"); +/// info!("info message"); +/// } +/// ``` +pub struct LogBuilder { + directives: Vec, + filter: Option, + format: Box String + Sync + Send>, +} + +impl LogBuilder { + /// Initializes the log builder with defaults + pub fn new() -> LogBuilder { + LogBuilder { + directives: Vec::new(), + filter: None, + format: Box::new(|record: &LogRecord| { + format!("{}:{}: {}", record.level(), + record.location().module_path(), record.args()) + }), + } + } + + /// Adds filters to the logger + /// + /// The given module (if any) will log at most the specified level provided. + /// If no module is provided then the filter will apply to all log messages. + pub fn filter(&mut self, + module: Option<&str>, + level: LogLevelFilter) -> &mut Self { + self.directives.push(LogDirective { + name: module.map(|s| s.to_string()), + level: level, + }); + self + } + + /// Sets the format function for formatting the log output. + /// + /// This function is called on each record logged to produce a string which + /// is actually printed out. + pub fn format(&mut self, format: F) -> &mut Self + where F: Fn(&LogRecord) -> String + Sync + Send + { + self.format = Box::new(format); + self + } + + /// Parses the directives string in the same form as the RUST_LOG + /// environment variable. + /// + /// See the module documentation for more details. + pub fn parse(&mut self, filters: &str) -> &mut Self { + let (directives, filter) = parse_logging_spec(filters); + + self.filter = filter; + + for directive in directives { + self.directives.push(directive); + } + self + } + + /// Initializes the global logger with an env logger. + /// + /// This should be called early in the execution of a Rust program, and the + /// global logger may only be initialized once. Future initialization + /// attempts will return an error. + pub fn init(&mut self) -> Result<(), SetLoggerError> { + log::set_logger(|max_level| { + let logger = self.build(); + max_level.set(logger.filter()); + Box::new(logger) + }) + } + + /// Build an env logger. + pub fn build(&mut self) -> Logger { + if self.directives.is_empty() { + // Adds the default filter if none exist + self.directives.push(LogDirective { + name: None, + level: LogLevelFilter::Error, + }); + } else { + // Sort the directives by length of their name, this allows a + // little more efficient lookup at runtime. + self.directives.sort_by(|a, b| { + let alen = a.name.as_ref().map(|a| a.len()).unwrap_or(0); + let blen = b.name.as_ref().map(|b| b.len()).unwrap_or(0); + alen.cmp(&blen) + }); + } + + Logger { + directives: mem::replace(&mut self.directives, Vec::new()), + filter: mem::replace(&mut self.filter, None), + format: mem::replace(&mut self.format, Box::new(|_| String::new())), + } + } +} + +impl Logger { + pub fn new() -> Logger { + let mut builder = LogBuilder::new(); + + if let Ok(s) = env::var("RUST_LOG") { + builder.parse(&s); + } + + builder.build() + } + + pub fn filter(&self) -> LogLevelFilter { + self.directives.iter() + .map(|d| d.level).max() + .unwrap_or(LogLevelFilter::Off) + } + + fn enabled(&self, level: LogLevel, target: &str) -> bool { + // Search for the longest match, the vector is assumed to be pre-sorted. + for directive in self.directives.iter().rev() { + match directive.name { + Some(ref name) if !target.starts_with(&**name) => {}, + Some(..) | None => { + return level <= directive.level + } + } + } + false + } +} + +impl Log for Logger { + fn enabled(&self, metadata: &LogMetadata) -> bool { + self.enabled(metadata.level(), metadata.target()) + } + + fn log(&self, record: &LogRecord) { + if !Log::enabled(self, record.metadata()) { + return; + } + + if let Some(filter) = self.filter.as_ref() { + if !filter.is_match(&*record.args().to_string()) { + return; + } + } + + let _ = writeln!(&mut io::stderr(), "{}", (self.format)(record)); + } +} + +struct LogDirective { + name: Option, + level: LogLevelFilter, +} + +/// Initializes the global logger with an env logger. +/// +/// This should be called early in the execution of a Rust program, and the +/// global logger may only be initialized once. Future initialization attempts +/// will return an error. +pub fn init() -> Result<(), SetLoggerError> { + let mut builder = LogBuilder::new(); + + if let Ok(s) = env::var("RUST_LOG") { + builder.parse(&s); + } + + builder.init() +} + +/// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=error/foo") +/// and return a vector with log directives. +fn parse_logging_spec(spec: &str) -> (Vec, Option) { + let mut dirs = Vec::new(); + + let mut parts = spec.split('/'); + let mods = parts.next(); + let filter = parts.next(); + if parts.next().is_some() { + println!("warning: invalid logging spec '{}', \ + ignoring it (too many '/'s)", spec); + return (dirs, None); + } + mods.map(|m| { for s in m.split(',') { + if s.len() == 0 { continue } + let mut parts = s.split('='); + let (log_level, name) = match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) { + (Some(part0), None, None) => { + // if the single argument is a log-level string or number, + // treat that as a global fallback + match part0.parse() { + Ok(num) => (num, None), + Err(_) => (LogLevelFilter::max(), Some(part0)), + } + } + (Some(part0), Some(""), None) => (LogLevelFilter::max(), Some(part0)), + (Some(part0), Some(part1), None) => { + match part1.parse() { + Ok(num) => (num, Some(part0)), + _ => { + println!("warning: invalid logging spec '{}', \ + ignoring it", part1); + continue + } + } + }, + _ => { + println!("warning: invalid logging spec '{}', \ + ignoring it", s); + continue + } + }; + dirs.push(LogDirective { + name: name.map(|s| s.to_string()), + level: log_level, + }); + }}); + + let filter = filter.map_or(None, |filter| { + match filter::Filter::new(filter) { + Ok(re) => Some(re), + Err(e) => { + println!("warning: invalid regex filter - {}", e); + None + } + } + }); + + return (dirs, filter); +} + +#[cfg(test)] +mod tests { + use log::{LogLevel, LogLevelFilter}; + + use super::{LogBuilder, Logger, LogDirective, parse_logging_spec}; + + fn make_logger(dirs: Vec) -> Logger { + let mut logger = LogBuilder::new().build(); + logger.directives = dirs; + logger + } + + #[test] + fn filter_info() { + let logger = LogBuilder::new().filter(None, LogLevelFilter::Info).build(); + assert!(logger.enabled(LogLevel::Info, "crate1")); + assert!(!logger.enabled(LogLevel::Debug, "crate1")); + } + + #[test] + fn filter_beginning_longest_match() { + let logger = LogBuilder::new() + .filter(Some("crate2"), LogLevelFilter::Info) + .filter(Some("crate2::mod"), LogLevelFilter::Debug) + .filter(Some("crate1::mod1"), LogLevelFilter::Warn) + .build(); + assert!(logger.enabled(LogLevel::Debug, "crate2::mod1")); + assert!(!logger.enabled(LogLevel::Debug, "crate2")); + } + + #[test] + fn parse_default() { + let logger = LogBuilder::new().parse("info,crate1::mod1=warn").build(); + assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); + assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); + } + + #[test] + fn match_full_path() { + let logger = make_logger(vec![ + LogDirective { + name: Some("crate2".to_string()), + level: LogLevelFilter::Info + }, + LogDirective { + name: Some("crate1::mod1".to_string()), + level: LogLevelFilter::Warn + } + ]); + assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); + assert!(!logger.enabled(LogLevel::Info, "crate1::mod1")); + assert!(logger.enabled(LogLevel::Info, "crate2")); + assert!(!logger.enabled(LogLevel::Debug, "crate2")); + } + + #[test] + fn no_match() { + let logger = make_logger(vec![ + LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, + LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } + ]); + assert!(!logger.enabled(LogLevel::Warn, "crate3")); + } + + #[test] + fn match_beginning() { + let logger = make_logger(vec![ + LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, + LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } + ]); + assert!(logger.enabled(LogLevel::Info, "crate2::mod1")); + } + + #[test] + fn match_beginning_longest_match() { + let logger = make_logger(vec![ + LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, + LogDirective { name: Some("crate2::mod".to_string()), level: LogLevelFilter::Debug }, + LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } + ]); + assert!(logger.enabled(LogLevel::Debug, "crate2::mod1")); + assert!(!logger.enabled(LogLevel::Debug, "crate2")); + } + + #[test] + fn match_default() { + let logger = make_logger(vec![ + LogDirective { name: None, level: LogLevelFilter::Info }, + LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } + ]); + assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); + assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); + } + + #[test] + fn zero_level() { + let logger = make_logger(vec![ + LogDirective { name: None, level: LogLevelFilter::Info }, + LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Off } + ]); + assert!(!logger.enabled(LogLevel::Error, "crate1::mod1")); + assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); + } + + #[test] + fn parse_logging_spec_valid() { + let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug"); + assert_eq!(dirs.len(), 3); + assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Error); + + assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); + assert_eq!(dirs[1].level, LogLevelFilter::max()); + + assert_eq!(dirs[2].name, Some("crate2".to_string())); + assert_eq!(dirs[2].level, LogLevelFilter::Debug); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_invalid_crate() { + // test parse_logging_spec with multiple = in specification + let (dirs, filter) = parse_logging_spec("crate1::mod1=warn=info,crate2=debug"); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate2".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Debug); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_invalid_log_level() { + // test parse_logging_spec with 'noNumber' as log level + let (dirs, filter) = parse_logging_spec("crate1::mod1=noNumber,crate2=debug"); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate2".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Debug); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_string_log_level() { + // test parse_logging_spec with 'warn' as log level + let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=warn"); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate2".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Warn); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_empty_log_level() { + // test parse_logging_spec with '' as log level + let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2="); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate2".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::max()); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_global() { + // test parse_logging_spec with no crate + let (dirs, filter) = parse_logging_spec("warn,crate2=debug"); + assert_eq!(dirs.len(), 2); + assert_eq!(dirs[0].name, None); + assert_eq!(dirs[0].level, LogLevelFilter::Warn); + assert_eq!(dirs[1].name, Some("crate2".to_string())); + assert_eq!(dirs[1].level, LogLevelFilter::Debug); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_valid_filter() { + let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug/abc"); + assert_eq!(dirs.len(), 3); + assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Error); + + assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); + assert_eq!(dirs[1].level, LogLevelFilter::max()); + + assert_eq!(dirs[2].name, Some("crate2".to_string())); + assert_eq!(dirs[2].level, LogLevelFilter::Debug); + assert!(filter.is_some() && filter.unwrap().to_string() == "abc"); + } + + #[test] + fn parse_logging_spec_invalid_crate_filter() { + let (dirs, filter) = parse_logging_spec("crate1::mod1=error=warn,crate2=debug/a.c"); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate2".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Debug); + assert!(filter.is_some() && filter.unwrap().to_string() == "a.c"); + } + + #[test] + fn parse_logging_spec_empty_with_filter() { + let (dirs, filter) = parse_logging_spec("crate1/a*c"); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate1".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::max()); + assert!(filter.is_some() && filter.unwrap().to_string() == "a*c"); + } +} diff -Nru cargo-0.17.0/vendor/env_logger-0.4.2/src/regex.rs cargo-0.19.0/vendor/env_logger-0.4.2/src/regex.rs --- cargo-0.17.0/vendor/env_logger-0.4.2/src/regex.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.4.2/src/regex.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,28 @@ +extern crate regex; + +use std::fmt; + +use self::regex::Regex; + +pub struct Filter { + inner: Regex, +} + +impl Filter { + pub fn new(spec: &str) -> Result { + match Regex::new(spec){ + Ok(r) => Ok(Filter { inner: r }), + Err(e) => Err(e.to_string()), + } + } + + pub fn is_match(&self, s: &str) -> bool { + self.inner.is_match(s) + } +} + +impl fmt::Display for Filter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} diff -Nru cargo-0.17.0/vendor/env_logger-0.4.2/src/string.rs cargo-0.19.0/vendor/env_logger-0.4.2/src/string.rs --- cargo-0.17.0/vendor/env_logger-0.4.2/src/string.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.4.2/src/string.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,21 @@ +use std::fmt; + +pub struct Filter { + inner: String, +} + +impl Filter { + pub fn new(spec: &str) -> Result { + Ok(Filter { inner: spec.to_string() }) + } + + pub fn is_match(&self, s: &str) -> bool { + s.contains(&self.inner) + } +} + +impl fmt::Display for Filter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} diff -Nru cargo-0.17.0/vendor/env_logger-0.4.2/tests/regexp_filter.rs cargo-0.19.0/vendor/env_logger-0.4.2/tests/regexp_filter.rs --- cargo-0.17.0/vendor/env_logger-0.4.2/tests/regexp_filter.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/env_logger-0.4.2/tests/regexp_filter.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,51 @@ +#[macro_use] extern crate log; +extern crate env_logger; + +use std::process; +use std::env; +use std::str; + +fn main() { + if env::var("LOG_REGEXP_TEST").ok() == Some(String::from("1")) { + child_main(); + } else { + parent_main() + } +} + +fn child_main() { + env_logger::init().unwrap(); + info!("XYZ Message"); +} + +fn run_child(rust_log: String) -> bool { + let exe = env::current_exe().unwrap(); + let out = process::Command::new(exe) + .env("LOG_REGEXP_TEST", "1") + .env("RUST_LOG", rust_log) + .output() + .unwrap_or_else(|e| panic!("Unable to start child process: {}", e)); + str::from_utf8(out.stderr.as_ref()).unwrap().contains("XYZ Message") +} + +fn assert_message_printed(rust_log: &str) { + if !run_child(rust_log.to_string()) { + panic!("RUST_LOG={} should allow the test log message", rust_log) + } +} + +fn assert_message_not_printed(rust_log: &str) { + if run_child(rust_log.to_string()) { + panic!("RUST_LOG={} should not allow the test log message", rust_log) + } +} + +fn parent_main() { + // test normal log severity levels + assert_message_printed("info"); + assert_message_not_printed("warn"); + + // test of regular expression filters + assert_message_printed("info/XYZ"); + assert_message_not_printed("info/XXX"); +} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/.cargo-checksum.json cargo-0.19.0/vendor/error-chain-0.7.2/.cargo-checksum.json --- cargo-0.17.0/vendor/error-chain-0.7.2/.cargo-checksum.json 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"ed8bb3864902ddf6322e6b1d6358bcaec9b51382a5216b9526ad0987ae430b0d",".travis.yml":"d56246d6c8796c638b5012c2d7a91d9b6ec101b6a47128e2d4bfa957c1c784e8","CHANGELOG.md":"8eb613d4a417752d4d1c81e065853e5ba0a21530e0881886c2ae4ffbf0ce57cd","Cargo.toml":"9e551bbef17e031db548e1a81f52d249c94db73a194daf0fe2906bc9404b9805","README.md":"6771ca940645b2f7e7a018c8cd25b25f8bf35786e229b54fa2fded1f2d0ae411","examples/all.rs":"2e6d530e95907bde1e49bda7fde7167568d08a370ade44a153612e2d1cb899d7","examples/doc.rs":"574948eb776c3d363f5cff9a48015bab6c17828c7306dc3eb8818afa90a31a83","examples/quickstart.rs":"0cd227741ed3559c0ead90dcc643cef30b73255d9c9f15c2ee20c4a1085d6f5c","examples/size.rs":"7922acd891dfd06f1d36308a3ccdf03def2646b2f39bfd1b15cf2896247bad8f","src/error_chain.rs":"236c4feead97661b33541434ae71f32c279738a81d0d4b7ce9c50550d5d6a662","src/example_generated.rs":"edaead3c4911afd0a0870cfcab11f8835eb17447031d227bbb5d17210379f778","src/lib.rs":"14ce5d1e76185e762db2414b51411095ddd38207a6f4d9dd50d4a041e7b77d88","src/quick_error.rs":"1889b9ca1f7a5e9124275fd5da81e709d0d6bd3b06915bf320c23d4c4f083301","src/quick_main.rs":"755028c2b4305482a1ab86f8b1b68a95eac22b331c94e14d29777dc69dad1bf4","tests/quick_main.rs":"1d6a726856b954d4cffddab00602583921972ceeeb2bf7ba9ebbac6a51584b53","tests/tests.rs":"2f7ceee2f9808d0985c848d99fe967e8f0b549cf144d4d692a5c5d1c2ba7d660"},"package":"318cb3c71ee4cdea69fdc9e15c173b245ed6063e1709029e8fd32525a881120f"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/Cargo.toml cargo-0.19.0/vendor/error-chain-0.7.2/Cargo.toml --- cargo-0.17.0/vendor/error-chain-0.7.2/Cargo.toml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -[package] - -name = "error-chain" -version = "0.7.2" -authors = [ "Brian Anderson ", - "Paul Colomiets ", - "Colin Kiegel ", - "Yamakaky "] -description = "Yet another error boilerplate library." - -documentation = "https://docs.rs/error-chain" -homepage = "https://github.com/brson/error-chain" -repository = "https://github.com/brson/error-chain" - -license = "MIT/Apache-2.0" - -[features] -default = ["backtrace", "example_generated"] -example_generated = [] - -[dependencies] -backtrace = { version = "0.3", optional = true } diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/CHANGELOG.md cargo-0.19.0/vendor/error-chain-0.7.2/CHANGELOG.md --- cargo-0.17.0/vendor/error-chain-0.7.2/CHANGELOG.md 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -# Unreleased - -# 0.7.2 - -- Add `quick_main!` (#88). -- `allow(unused)` for the `Result` wrapper. -- Minimum rust version supported is not 1.10 on some conditions (#103). - -# 0.7.1 - -- [Add the `bail!` macro](https://github.com/brson/error-chain/pull/76) - -# 0.7.0 - -- [Rollback several design changes to fix regressions](https://github.com/brson/error-chain/pull/75) -- New `Variant(Error) #[attrs]` for `links` and `foreign_links`. -- Hide implementation details from the doc. -- Always generate `Error::backtrace`. - -# 0.6.2 - -- Allow dead code. - -# 0.6.1 - -- Fix wrong trait constraint in ResultExt implementation (#66). - -# 0.6.0 - -- Conditional compilation for error variants. -- Backtrace generation is now a feature. -- More standard trait implementations for extra convenience. -- Remove ChainErr. -- Remove need to specify `ErrorKind` in `links {}`. -- Add ResultExt trait. -- Error.1 is a struct instead of a tuple. -- Error is now a struct. -- The declarations order is more flexible. -- Way better error reporting when there is a syntax error in the macro call. -- `Result` generation can be disabled. -- At most one declaration of each type can be present. - -# 0.5.0 - -- [Only generate backtraces with RUST_BACKTRACE set](https://github.com/brson/error-chain/pull/27) -- [Fixup matching, disallow repeating "types" section](https://github.com/brson/error-chain/pull/26) -- [Fix tests on stable/beta](https://github.com/brson/error-chain/pull/28) -- [Only deploy docs when tagged](https://github.com/brson/error-chain/pull/30) - -Contributors: benaryorg, Brian Anderson, Georg Brandl - -# 0.4.2 - -- [Fix the resolution of the ErrorKind description method](https://github.com/brson/error-chain/pull/24) - -Contributors: Brian Anderson - -# 0.4.1 (yanked) - -- [Fix a problem with resolving methods of the standard Error type](https://github.com/brson/error-chain/pull/22) - -Contributors: Brian Anderson - -# 0.4.0 (yanked) - -- [Remove the foreign link description and forward to the foreign error](https://github.com/brson/error-chain/pull/19) -- [Allow missing sections](https://github.com/brson/error-chain/pull/17) - -Contributors: Brian Anderson, Taylor Cramer - -# 0.3.0 - -- [Forward Display implementation for foreign errors](https://github.com/brson/error-chain/pull/13) - -Contributors: Brian Anderson, Taylor Cramer - -# 0.2.2 - -- [Don't require `types` section in macro invocation](https://github.com/brson/error-chain/pull/8) -- [Add "quick start" to README](https://github.com/brson/error-chain/pull/9) - -Contributors: Brian Anderson, Jake Shadle, Nate Mara diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/examples/all.rs cargo-0.19.0/vendor/error-chain-0.7.2/examples/all.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/examples/all.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/examples/all.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -#[macro_use] -extern crate error_chain; - -pub mod inner { - error_chain! {} -} - -#[cfg(feature = "a_feature")] -pub mod feature { - error_chain! {} -} - -error_chain! { - // Types generated by the macro. If empty of absent, it defaults to - // Error, ErrorKind, Result; - types { - // With custom names: - MyError, MyErrorKind, MyResult; - // Without the `Result` wrapper: - // Error, ErrorKind; - } - - // Automatic bindings to others error types generated by `error_chain!`. - links { - Inner(inner::Error, inner::ErrorKind); - // Attributes can be added at the end of the declaration. - Feature(feature::Error, feature::ErrorKind) #[cfg(feature = "a_feature")]; - } - - // Bindings to types implementing std::error::Error. - foreign_links { - Io(::std::io::Error); - } -} - -fn main() {} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/examples/doc.rs cargo-0.19.0/vendor/error-chain-0.7.2/examples/doc.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/examples/doc.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/examples/doc.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -#![deny(missing_docs)] - -//! This module is used to check that all generated items are documented. - -#[macro_use] -extern crate error_chain; - -/// Inner module. -pub mod inner { - error_chain! { - } -} - -error_chain! { - links { - Inner(inner::Error, inner::ErrorKind) #[doc = "Doc"]; - } - foreign_links { - Io(::std::io::Error) #[doc = "Io"]; - } - errors { - /// Doc - Test2 { - - } - } -} - -fn main() {} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/examples/quickstart.rs cargo-0.19.0/vendor/error-chain-0.7.2/examples/quickstart.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/examples/quickstart.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/examples/quickstart.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -// Simple and robust error handling with error-chain! -// Use this as a template for new projects. - -// `error_chain!` can recurse deeply -#![recursion_limit = "1024"] - -// Import the macro. Don't forget to add `error-chain` in your -// `Cargo.toml`! -#[macro_use] -extern crate error_chain; - -// We'll put our errors in an `errors` module, and other modules in -// this crate will `use errors::*;` to get access to everything -// `error_chain!` creates. -mod errors { - // Create the Error, ErrorKind, ResultExt, and Result types - error_chain! { } -} - -use errors::*; - -fn main() { - if let Err(ref e) = run() { - use ::std::io::Write; - let stderr = &mut ::std::io::stderr(); - let errmsg = "Error writing to stderr"; - - writeln!(stderr, "error: {}", e).expect(errmsg); - - for e in e.iter().skip(1) { - writeln!(stderr, "caused by: {}", e).expect(errmsg); - } - - // The backtrace is not always generated. Try to run this example - // with `RUST_BACKTRACE=1`. - if let Some(backtrace) = e.backtrace() { - writeln!(stderr, "backtrace: {:?}", backtrace).expect(errmsg); - } - - ::std::process::exit(1); - } -} - -// Use this macro to auto-generate the main above. You may want to -// set the `RUST_BACKTRACE` env variable to see a backtrace. -//quick_main!(run); - - -// Most functions will return the `Result` type, imported from the -// `errors` module. It is a typedef of the standard `Result` type -// for which the error type is always our own `Error`. -fn run() -> Result<()> { - use std::fs::File; - - // This operation will fail - File::open("tretrete") - .chain_err(|| "unable to open tretrete file")?; - - Ok(()) -} - diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/examples/size.rs cargo-0.19.0/vendor/error-chain-0.7.2/examples/size.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/examples/size.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/examples/size.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -#[macro_use] -extern crate error_chain; - -use std::mem::{size_of, size_of_val}; - -error_chain! { - errors { - AVariant - Another - } -} - -fn main() { - println!("Memory usage in bytes"); - println!("---------------------"); - println!("Result<()>: {}", size_of::>()); - println!(" (): {}", size_of::<()>()); - println!(" Error: {}", size_of::()); - println!(" ErrorKind: {}", size_of::()); - let msg = ErrorKind::Msg("test".into()); - println!(" ErrorKind::Msg: {}", size_of_val(&msg)); - println!(" String: {}", size_of::()); - println!(" State: {}", size_of::()); - #[cfg(feature = "backtrace")] - { - let state = error_chain::State { - next_error: None, - backtrace: None, - }; - println!(" State.next_error: {}", size_of_val(&state.next_error)); - println!(" State.backtrace: {}", size_of_val(&state.backtrace)); - } - #[cfg(not(feature = "backtrace"))] - { - let state = error_chain::State { - next_error: None, - }; - println!(" State.next_error: {}", size_of_val(&state.next_error)); - } -} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/.gitignore cargo-0.19.0/vendor/error-chain-0.7.2/.gitignore --- cargo-0.17.0/vendor/error-chain-0.7.2/.gitignore 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -*~ -target/ -Cargo.lock \ No newline at end of file diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/README.md cargo-0.19.0/vendor/error-chain-0.7.2/README.md --- cargo-0.17.0/vendor/error-chain-0.7.2/README.md 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -# error-chain - Consistent error handling for Rust - -[![Build Status](https://api.travis-ci.org/brson/error-chain.svg?branch=master)](https://travis-ci.org/brson/error-chain) -[![Latest Version](https://img.shields.io/crates/v/error-chain.svg)](https://crates.io/crates/error-chain) -[![License](https://img.shields.io/github/license/brson/error-chain.svg)](https://github.com/brson/error-chain) - -`error-chain` makes it easy to take full advantage of Rust's error -handling features without the overhead of maintaining boilerplate -error types and conversions. It implements an opinionated strategy for -defining your own error types, as well as conversions from others' -error types. - -[Documentation (crates.io)](https://docs.rs/error-chain). - -[Documentation (master)](https://brson.github.io/error-chain). - -## Quick start - -If you just want to set up your new project with error-chain, -follow the [quickstart.rs] template, and read this [intro] -to error-chain. - -[quickstart.rs]: https://github.com/brson/error-chain/blob/master/examples/quickstart.rs -[intro]: http://brson.github.io/2016/11/30/starting-with-error-chain - -## Supported Rust version - -Please view the beginning of the [Travis configuration file](.travis.yml) -to see the oldest supported Rust version. - -Note that `error-chain` supports older versions of Rust when built with -`default-features = false`. - -## License - -MIT/Apache-2.0 diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/src/error_chain.rs cargo-0.19.0/vendor/error-chain-0.7.2/src/error_chain.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/src/error_chain.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/src/error_chain.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,405 +0,0 @@ -/// Prefer to use `error_chain` instead of this macro. -#[macro_export] -macro_rules! error_chain_processed { - // Default values for `types`. - ( - types {} - $( $rest: tt )* - ) => { - error_chain_processed! { - types { - Error, ErrorKind, ResultExt, Result; - } - $( $rest )* - } - }; - // With `Result` wrapper. - ( - types { - $error_name:ident, $error_kind_name:ident, - $result_ext_name:ident, $result_name:ident; - } - $( $rest: tt )* - ) => { - error_chain_processed! { - types { - $error_name, $error_kind_name, - $result_ext_name; - } - $( $rest )* - } - /// Convenient wrapper around `std::Result`. - #[allow(unused)] - pub type $result_name = ::std::result::Result; - }; - // Without `Result` wrapper. - ( - types { - $error_name:ident, $error_kind_name:ident, - $result_ext_name:ident; - } - - links { - $( $link_variant:ident ( $link_error_path:path, $link_kind_path:path ) - $( #[$meta_links:meta] )*; ) * - } - - foreign_links { - $( $foreign_link_variant:ident ( $foreign_link_error_path:path ) - $( #[$meta_foreign_links:meta] )*; )* - } - - errors { - $( $error_chunks:tt ) * - } - - ) => { - /// The Error type. - /// - /// This struct is made of three things: - /// - /// - an `ErrorKind` which is used to determine the type of the error. - /// - a backtrace, generated when the error is created. - /// - an error chain, used for the implementation of `Error::cause()`. - #[derive(Debug)] - pub struct $error_name( - // The members must be `pub` for `links`. - /// The kind of the error. - #[doc(hidden)] - pub $error_kind_name, - /// Contains the error chain and the backtrace. - #[doc(hidden)] - pub $crate::State, - ); - - impl $crate::ChainedError for $error_name { - type ErrorKind = $error_kind_name; - - fn new(kind: $error_kind_name, state: $crate::State) -> $error_name { - $error_name(kind, state) - } - - fn from_kind(kind: Self::ErrorKind) -> Self { - Self::from_kind(kind) - } - - fn kind(&self) -> &Self::ErrorKind { - self.kind() - } - - fn iter(&self) -> $crate::ErrorChainIter { - $crate::ErrorChainIter(Some(self)) - } - - fn backtrace(&self) -> Option<&$crate::Backtrace> { - self.backtrace() - } - - impl_extract_backtrace!($error_name - $error_kind_name - $([$link_error_path, $(#[$meta_links])*])*); - } - - #[allow(dead_code)] - impl $error_name { - /// Constructs an error from a kind, and generates a backtrace. - pub fn from_kind(kind: $error_kind_name) -> $error_name { - $error_name( - kind, - $crate::State::default(), - ) - } - - /// Returns the kind of the error. - pub fn kind(&self) -> &$error_kind_name { - &self.0 - } - - /// Iterates over the error chain. - pub fn iter(&self) -> $crate::ErrorChainIter { - $crate::ChainedError::iter(self) - } - - /// Returns the backtrace associated with this error. - pub fn backtrace(&self) -> Option<&$crate::Backtrace> { - self.1.backtrace() - } - } - - impl ::std::error::Error for $error_name { - fn description(&self) -> &str { - self.0.description() - } - - fn cause(&self) -> Option<&::std::error::Error> { - match self.1.next_error { - Some(ref c) => Some(&**c), - None => { - match self.0 { - $( - $(#[$meta_foreign_links])* - $error_kind_name::$foreign_link_variant(ref foreign_err) => { - foreign_err.cause() - } - ) * - _ => None - } - } - } - } - } - - impl ::std::fmt::Display for $error_name { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - ::std::fmt::Display::fmt(&self.0, f) - } - } - - $( - $(#[$meta_links])* - impl From<$link_error_path> for $error_name { - fn from(e: $link_error_path) -> Self { - $error_name( - $error_kind_name::$link_variant(e.0), - e.1, - ) - } - } - ) * - - $( - $(#[$meta_foreign_links])* - impl From<$foreign_link_error_path> for $error_name { - fn from(e: $foreign_link_error_path) -> Self { - $error_name::from_kind( - $error_kind_name::$foreign_link_variant(e) - ) - } - } - ) * - - impl From<$error_kind_name> for $error_name { - fn from(e: $error_kind_name) -> Self { - $error_name::from_kind(e) - } - } - - impl<'a> From<&'a str> for $error_name { - fn from(s: &'a str) -> Self { - $error_name::from_kind(s.into()) - } - } - - impl From for $error_name { - fn from(s: String) -> Self { - $error_name::from_kind(s.into()) - } - } - - impl ::std::ops::Deref for $error_name { - type Target = $error_kind_name; - - fn deref(&self) -> &Self::Target { - &self.0 - } - } - - - // The ErrorKind type - // -------------- - - quick_error! { - /// The kind of an error. - #[derive(Debug)] - pub enum $error_kind_name { - - /// A convenient variant for String. - Msg(s: String) { - description(&s) - display("{}", s) - } - - $( - $(#[$meta_links])* - $link_variant(e: $link_kind_path) { - description(e.description()) - display("{}", e) - } - ) * - - $( - $(#[$meta_foreign_links])* - $foreign_link_variant(err: $foreign_link_error_path) { - description(::std::error::Error::description(err)) - display("{}", err) - } - ) * - - $($error_chunks)* - } - } - - $( - $(#[$meta_links])* - impl From<$link_kind_path> for $error_kind_name { - fn from(e: $link_kind_path) -> Self { - $error_kind_name::$link_variant(e) - } - } - ) * - - impl<'a> From<&'a str> for $error_kind_name { - fn from(s: &'a str) -> Self { - $error_kind_name::Msg(s.to_string()) - } - } - - impl From for $error_kind_name { - fn from(s: String) -> Self { - $error_kind_name::Msg(s) - } - } - - impl From<$error_name> for $error_kind_name { - fn from(e: $error_name) -> Self { - e.0 - } - } - - // The ResultExt trait defines the `chain_err` method. - - /// Additional methods for `Result`, for easy interaction with this crate. - pub trait $result_ext_name { - /// If the `Result` is an `Err` then `chain_err` evaluates the closure, - /// which returns *some type that can be converted to `ErrorKind`*, boxes - /// the original error to store as the cause, then returns a new error - /// containing the original error. - fn chain_err(self, callback: F) -> ::std::result::Result - where F: FnOnce() -> EK, - EK: Into<$error_kind_name>; - } - - impl $result_ext_name for ::std::result::Result where E: ::std::error::Error + Send + 'static { - fn chain_err(self, callback: F) -> ::std::result::Result - where F: FnOnce() -> EK, - EK: Into<$error_kind_name> { - self.map_err(move |e| { - let state = $crate::State::new::<$error_name>(Box::new(e), ); - $crate::ChainedError::new(callback().into(), state) - }) - } - } - - - }; -} - -/// Internal macro used for reordering of the fields. -#[doc(hidden)] -#[macro_export] -macro_rules! error_chain_processing { - ( - ({}, $b:tt, $c:tt, $d:tt) - types $content:tt - $( $tail:tt )* - ) => { - error_chain_processing! { - ($content, $b, $c, $d) - $($tail)* - } - }; - ( - ($a:tt, {}, $c:tt, $d:tt) - links $content:tt - $( $tail:tt )* - ) => { - error_chain_processing! { - ($a, $content, $c, $d) - $($tail)* - } - }; - ( - ($a:tt, $b:tt, {}, $d:tt) - foreign_links $content:tt - $( $tail:tt )* - ) => { - error_chain_processing! { - ($a, $b, $content, $d) - $($tail)* - } - }; - ( - ($a:tt, $b:tt, $c:tt, {}) - errors $content:tt - $( $tail:tt )* - ) => { - error_chain_processing! { - ($a, $b, $c, $content) - $($tail)* - } - }; - ( ($a:tt, $b:tt, $c:tt, $d:tt) ) => { - error_chain_processed! { - types $a - links $b - foreign_links $c - errors $d - } - }; -} - -/// This macro is used for handling of duplicated and out-of-order fields. For -/// the exact rules, see `error_chain_processed`. -#[macro_export] -macro_rules! error_chain { - ( $( $block_name:ident { $( $block_content:tt )* } )* ) => { - error_chain_processing! { - ({}, {}, {}, {}) - $($block_name { $( $block_content )* })* - } - }; -} - -/// Macro used to manage the `backtrace` feature. -/// -/// See -/// https://www.reddit.com/r/rust/comments/57virt/hey_rustaceans_got_an_easy_question_ask_here/da5r4ti/?context=3 -/// for more details. -#[macro_export] -#[doc(hidden)] -#[cfg(feature = "backtrace")] -macro_rules! impl_extract_backtrace { - ($error_name: ident - $error_kind_name: ident - $([$link_error_path: path, $(#[$meta_links: meta])*])*) => { - fn extract_backtrace(e: &(::std::error::Error + Send + 'static)) - -> Option<::std::sync::Arc<$crate::Backtrace>> { - if let Some(e) = e.downcast_ref::<$error_name>() { - return e.1.backtrace.clone(); - } - $( - $( #[$meta_links] )* - { - if let Some(e) = e.downcast_ref::<$link_error_path>() { - return e.1.backtrace.clone(); - } - } - ) * - None - } - } -} - -/// Macro used to manage the `backtrace` feature. -/// -/// See -/// https://www.reddit.com/r/rust/comments/57virt/hey_rustaceans_got_an_easy_question_ask_here/da5r4ti/?context=3 -/// for more details. -#[macro_export] -#[doc(hidden)] -#[cfg(not(feature = "backtrace"))] -macro_rules! impl_extract_backtrace { - ($error_name: ident - $error_kind_name: ident - $([$link_error_path: path, $(#[$meta_links: meta])*])*) => {} -} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/src/example_generated.rs cargo-0.19.0/vendor/error-chain-0.7.2/src/example_generated.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/src/example_generated.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/src/example_generated.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -//! This modules show an example of code generated by the macro. IT MUST NOT BE -//! USED OUTSIDE THIS CRATE. -//! -//! This is the basic error structure. You can see that `ErrorKind` -//! has been populated in a variety of ways. All `ErrorKind`s get a -//! `Msg` variant for basic errors. When strings are converted to -//! `ErrorKind`s they become `ErrorKind::Msg`. The "links" defined in -//! the macro are expanded to the `Inner` variant, and the -//! "foreign links" to the `Io` variant. -//! -//! Both types come with a variety of `From` conversions as well: -//! `Error` can be created from `ErrorKind`, `&str` and `String`, -//! and the `links` and `foreign_links` error types. `ErrorKind` -//! can be created from the corresponding `ErrorKind`s of the link -//! types, as well as from `&str` and `String`. -//! -//! `into()` and `From::from` are used heavily to massage types into -//! the right shape. Which one to use in any specific case depends on -//! the influence of type inference, but there are some patterns that -//! arise frequently. - -/// Another code generated by the macro. -pub mod inner { - error_chain! {} -} - -error_chain! { - links { - Inner(inner::Error, inner::ErrorKind) #[doc = "Link to another `ErrorChain`."]; - } - foreign_links { - Io(::std::io::Error) #[doc = "Link to a `std::error::Error` type."]; - } - errors { - #[doc = "A custom error kind."] - Custom - } -} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/src/lib.rs cargo-0.19.0/vendor/error-chain-0.7.2/src/lib.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/src/lib.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,581 +0,0 @@ -#![deny(missing_docs)] - -//! A library for consistent and reliable error handling -//! -//! error-chain makes it easy to take full advantage of Rust's -//! powerful error handling features without the overhead of -//! maintaining boilerplate error types and conversions. It implements -//! an opinionated strategy for defining your own error types, as well -//! as conversions from others' error types. -//! -//! ## Quick start -//! -//! If you just want to set up your new project with error-chain, -//! follow the [quickstart.rs] template, and read this [intro] -//! to error-chain. -//! -//! [quickstart.rs]: https://github.com/brson/error-chain/blob/master/examples/quickstart.rs -//! [intro]: http://brson.github.io/2016/11/30/starting-with-error-chain -//! -//! ## Why error chain? -//! -//! * error-chain is easy to configure. Handle errors robustly with minimal -//! effort. -//! * Basic error handling requires no maintenance of custom error types -//! nor the `From` conversions that make `?` work. -//! * error-chain scales from simple error handling strategies to more -//! rigorous. Return formatted strings for simple errors, only -//! introducing error variants and their strong typing as needed for -//! advanced error recovery. -//! * error-chain makes it trivial to correctly manage the [cause] of -//! the errors generated by your own code. This is the "chaining" -//! in "error-chain". -//! -//! [cause]: https://doc.rust-lang.org/std/error/trait.Error.html#method.cause -//! -//! ## Principles of error-chain -//! -//! error-chain is based on the following principles: -//! -//! * No error should ever be discarded. This library primarily -//! makes it easy to "chain" errors with the `chain_err` method. -//! * Introducing new errors is trivial. Simple errors can be introduced -//! at the error site with just a string. -//! * Handling errors is possible with pattern matching. -//! * Conversions between error types are done in an automatic and -//! consistent way - `From` conversion behavior is never specified -//! explicitly. -//! * Errors implement Send. -//! * Errors can carry backtraces. -//! -//! Similar to other libraries like [error-type] and [quick-error], -//! this library introduces the error chaining mechanism originally -//! employed by Cargo. The `error_chain!` macro declares the types -//! and implementation boilerplate necessary for fulfilling a -//! particular error-handling strategy. Most importantly it defines a -//! custom error type (called `Error` by convention) and the `From` -//! conversions that let the `try!` macro and `?` operator work. -//! -//! This library differs in a few ways from previous error libs: -//! -//! * Instead of defining the custom `Error` type as an enum, it is a -//! struct containing an `ErrorKind` (which defines the -//! `description` and `display` methods for the error), an opaque, -//! optional, boxed `std::error::Error + Send + 'static` object -//! (which defines the `cause`, and establishes the links in the -//! error chain), and a `Backtrace`. -//! * The macro also defines a `ResultExt` trait that defines a -//! `chain_err` method. This method on all `std::error::Error + Send + 'static` -//! types extends the error chain by boxing the current -//! error into an opaque object and putting it inside a new concrete -//! error. -//! * It provides automatic `From` conversions between other error types -//! defined by the `error_chain!` that preserve type information, -//! and facilitate seamless error composition and matching of composed -//! errors. -//! * It provides automatic `From` conversions between any other error -//! type that hides the type of the other error in the `cause` box. -//! * If `RUST_BACKTRACE` is enabled, it collects a single backtrace at -//! the earliest opportunity and propagates it down the stack through -//! `From` and `ResultExt` conversions. -//! -//! To accomplish its goals it makes some tradeoffs: -//! -//! * The split between the `Error` and `ErrorKind` types can make it -//! slightly more cumbersome to instantiate new (unchained) errors, -//! requiring an `Into` or `From` conversion; as well as slightly -//! more cumbersome to match on errors with another layer of types -//! to match. -//! * Because the error type contains `std::error::Error + Send + 'static` objects, -//! it can't implement `PartialEq` for easy comparisons. -//! -//! ## Declaring error types -//! -//! Generally, you define one family of error types per crate, though -//! it's also perfectly fine to define error types on a finer-grained -//! basis, such as per module. -//! -//! Assuming you are using crate-level error types, typically you will -//! define an `errors` module and inside it call `error_chain!`: -//! -//! ``` -//! # #[macro_use] extern crate error_chain; -//! mod other_error { -//! error_chain! {} -//! } -//! -//! error_chain! { -//! // The type defined for this error. These are the conventional -//! // and recommended names, but they can be arbitrarily chosen. -//! // It is also possible to leave this block out entirely, or -//! // leave it empty, and these names will be used automatically. -//! types { -//! Error, ErrorKind, ResultExt, Result; -//! } -//! -//! // Without the `Result` wrapper: -//! // -//! // types { -//! // Error, ErrorKind, ResultExt; -//! // } -//! -//! // Automatic conversions between this error chain and other -//! // error chains. In this case, it will e.g. generate an -//! // `ErrorKind` variant called `Dist` which in turn contains -//! // the `rustup_dist::ErrorKind`, with conversions from -//! // `rustup_dist::Error`. -//! // -//! // Optionally, some attributes can be added to a variant. -//! // -//! // This section can be empty. -//! links { -//! Another(other_error::Error, other_error::ErrorKind) #[cfg(unix)]; -//! } -//! -//! // Automatic conversions between this error chain and other -//! // error types not defined by the `error_chain!`. These will be -//! // wrapped in a new error with, in this case, the -//! // `ErrorKind::Temp` variant. The description and cause will -//! // forward to the description and cause of the original error. -//! // -//! // Optionally, some attributes can be added to a variant. -//! // -//! // This section can be empty. -//! foreign_links { -//! Fmt(::std::fmt::Error); -//! Io(::std::io::Error) #[cfg(unix)]; -//! } -//! -//! // Define additional `ErrorKind` variants. The syntax here is -//! // the same as `quick_error!`, but the `from()` and `cause()` -//! // syntax is not supported. -//! errors { -//! InvalidToolchainName(t: String) { -//! description("invalid toolchain name") -//! display("invalid toolchain name: '{}'", t) -//! } -//! } -//! } -//! -//! # fn main() {} -//! ``` -//! -//! Each section, `types`, `links`, `foreign_links`, and `errors` may -//! be omitted if it is empty. -//! -//! This populates the module with a number of definitions, -//! the most important of which are the `Error` type -//! and the `ErrorKind` type. An example of generated code can be found in the -//! [example_generated](example_generated) module. -//! -//! ## Returning new errors -//! -//! Introducing new error chains, with a string message: -//! -//! ``` -//! # #[macro_use] extern crate error_chain; -//! # fn main() {} -//! # error_chain! {} -//! fn foo() -> Result<()> { -//! Err("foo error!".into()) -//! } -//! ``` -//! -//! Introducing new error chains, with an `ErrorKind`: -//! -//! ``` -//! # #[macro_use] extern crate error_chain; -//! # fn main() {} -//! error_chain! { -//! errors { FooError } -//! } -//! -//! fn foo() -> Result<()> { -//! Err(ErrorKind::FooError.into()) -//! } -//! ``` -//! -//! Note that the return type is the typedef `Result`, which is -//! defined by the macro as `pub type Result = -//! ::std::result::Result`. Note that in both cases -//! `.into()` is called to convert a type into the `Error` type; both -//! strings and `ErrorKind` have `From` conversions to turn them into -//! `Error`. -//! -//! When the error is emitted inside a `try!` macro or behind the -//! `?` operator, the explicit conversion isn't needed; `try!` will -//! automatically convert `Err(ErrorKind)` to `Err(Error)`. So the -//! below is equivalent to the previous: -//! -//! ``` -//! # #[macro_use] extern crate error_chain; -//! # fn main() {} -//! # error_chain! { errors { FooError } } -//! fn foo() -> Result<()> { -//! Ok(try!(Err(ErrorKind::FooError))) -//! } -//! -//! fn bar() -> Result<()> { -//! Ok(try!(Err("bogus!"))) -//! } -//! ``` -//! -//! ## The `bail!` macro -//! -//! The above method of introducing new errors works but is a little -//! verbose. Instead we can use the `bail!` macro, which, much like `try!` -//! and `?`, performs an early return with conversions. With `bail!` the -//! previous examples look like: -//! -//! ``` -//! # #[macro_use] extern crate error_chain; -//! # fn main() {} -//! # error_chain! { errors { FooError } } -//! fn foo() -> Result<()> { -//! bail!(ErrorKind::FooError); -//! -//! Ok(()) -//! } -//! -//! fn bar() -> Result<()> { -//! bail!("bogus!"); -//! -//! Ok(()) -//! } -//! ``` -//! -//! ## Chaining errors -//! -//! To extend the error chain: -//! -//! ``` -//! # #[macro_use] extern crate error_chain; -//! # fn main() {} -//! # error_chain! {} -//! # fn do_something() -> Result<()> { unimplemented!() } -//! # fn test() -> Result<()> { -//! let res: Result<()> = do_something().chain_err(|| "something went wrong"); -//! # Ok(()) -//! # } -//! ``` -//! -//! `chain_err` can be called on any `Result` type where the contained -//! error type implements `std::error::Error + Send + 'static`. If -//! the `Result` is an `Err` then `chain_err` evaluates the closure, -//! which returns *some type that can be converted to `ErrorKind`*, -//! boxes the original error to store as the cause, then returns a new -//! error containing the original error. -//! -//! ## Matching errors -//! -//! error-chain error variants are matched with simple patterns. -//! `Error` is a tuple struct and its first field is the `ErrorKind`, -//! making dispatching on error kinds relatively compact: -//! -//! ``` -//! # #[macro_use] extern crate error_chain; -//! # fn main() { -//! error_chain! { -//! errors { -//! InvalidToolchainName(t: String) { -//! description("invalid toolchain name") -//! display("invalid toolchain name: '{}'", t) -//! } -//! } -//! } -//! -//! match Error::from("error!") { -//! Error(ErrorKind::InvalidToolchainName(_), _) => { } -//! Error(ErrorKind::Msg(_), _) => { } -//! } -//! # } -//! ``` -//! -//! Chained errors are also matched with (relatively) compact syntax -//! -//! ``` -//! # #[macro_use] extern crate error_chain; -//! mod utils { -//! error_chain! { -//! errors { -//! BadStuff { -//! description("bad stuff") -//! } -//! } -//! } -//! } -//! -//! mod app { -//! error_chain! { -//! links { -//! Utils(::utils::Error, ::utils::ErrorKind); -//! } -//! } -//! } -//! -//! -//! # fn main() { -//! match app::Error::from("error!") { -//! app::Error(app::ErrorKind::Utils(utils::ErrorKind::BadStuff), _) => { } -//! _ => { } -//! } -//! # } -//! ``` -//! -//! ## Foreign links -//! -//! Errors that do not conform to the same conventions as this library -//! can still be included in the error chain. They are considered "foreign -//! errors", and are declared using the `foreign_links` block of the -//! `error_chain!` macro. `Error`s are automatically created from -//! foreign errors by the `try!` macro. -//! -//! Foreign links and regular links have one crucial difference: -//! `From` conversions for regular links *do not introduce a new error -//! into the error chain*, while conversions for foreign links *always -//! introduce a new error into the error chain*. So for the example -//! above all errors deriving from the `temp::Error` type will be -//! presented to the user as a new `ErrorKind::Temp` variant, and the -//! cause will be the original `temp::Error` error. In contrast, when -//! `rustup_utils::Error` is converted to `Error` the two `ErrorKind`s -//! are converted between each other to create a new `Error` but the -//! old error is discarded; there is no "cause" created from the -//! original error. -//! -//! ## Backtraces -//! -//! If the `RUST_BACKTRACE` environment variable is set to anything -//! but ``0``, the earliest non-foreign error to be generated creates -//! a single backtrace, which is passed through all `From` conversions -//! and `chain_err` invocations of compatible types. To read the -//! backtrace just call the `backtrace()` method. -//! -//! Backtrace generation can be disabled by turning off the `backtrace` feature. -//! -//! ## Iteration -//! -//! The `iter` method returns an iterator over the chain of error boxes. -//! -//! [error-type]: https://github.com/DanielKeep/rust-error-type -//! [quick-error]: https://github.com/tailhook/quick-error - - -#[cfg(feature = "backtrace")] -extern crate backtrace; - -use std::error; -use std::iter::Iterator; -#[cfg(feature = "backtrace")] -use std::sync::Arc; - -#[cfg(feature = "backtrace")] -pub use backtrace::Backtrace; -#[cfg(not(feature = "backtrace"))] -/// Dummy type used when the `backtrace` feature is disabled. -pub type Backtrace = (); - -#[macro_use] -mod quick_error; -#[macro_use] -mod error_chain; -#[macro_use] -mod quick_main; -pub use quick_main::ExitCode; -#[cfg(feature = "example_generated")] -pub mod example_generated; - -/// Iterator over the error chain using the `Error::cause()` method. -pub struct ErrorChainIter<'a>(pub Option<&'a error::Error>); - -impl<'a> Iterator for ErrorChainIter<'a> { - type Item = &'a error::Error; - - fn next<'b>(&'b mut self) -> Option<&'a error::Error> { - match self.0.take() { - Some(e) => { - self.0 = e.cause(); - Some(e) - } - None => None, - } - } -} - -/// Returns a backtrace of the current call stack if `RUST_BACKTRACE` -/// is set to anything but ``0``, and `None` otherwise. This is used -/// in the generated error implementations. -#[cfg(feature = "backtrace")] -#[doc(hidden)] -pub fn make_backtrace() -> Option> { - match std::env::var_os("RUST_BACKTRACE") { - Some(ref val) if val != "0" => Some(Arc::new(Backtrace::new())), - _ => None, - } -} - -/// This trait is implemented on all the errors generated by the `error_chain` -/// macro. -pub trait ChainedError: error::Error + Send + 'static { - /// Associated kind type. - type ErrorKind; - - /// Constructs an error from a kind, and generates a backtrace. - fn from_kind(kind: Self::ErrorKind) -> Self where Self: Sized; - - /// Returns the kind of the error. - fn kind(&self) -> &Self::ErrorKind; - - /// Iterates over the error chain. - fn iter(&self) -> ErrorChainIter; - - /// Returns the backtrace associated with this error. - fn backtrace(&self) -> Option<&Backtrace>; - - /// Creates an error from its parts. - #[doc(hidden)] - fn new(kind: Self::ErrorKind, state: State) -> Self where Self: Sized; - - /// Returns the first known backtrace, either from its State or from one - /// of the errors from `foreign_links`. - #[cfg(feature = "backtrace")] - #[doc(hidden)] - fn extract_backtrace(e: &(error::Error + Send + 'static)) -> Option> - where Self: Sized; -} - -/// Common state between errors. -#[derive(Debug)] -#[doc(hidden)] -pub struct State { - /// Next error in the error chain. - pub next_error: Option>, - /// Backtrace for the current error. - #[cfg(feature = "backtrace")] - pub backtrace: Option>, -} - -impl Default for State { - #[cfg(feature = "backtrace")] - fn default() -> State { - State { - next_error: None, - backtrace: make_backtrace(), - } - } - - #[cfg(not(feature = "backtrace"))] - fn default() -> State { - State { next_error: None } - } -} - -impl State { - /// Creates a new State type - #[cfg(feature = "backtrace")] - pub fn new(e: Box) -> State { - let backtrace = CE::extract_backtrace(&*e).or_else(make_backtrace); - State { - next_error: Some(e), - backtrace: backtrace, - } - } - - /// Creates a new State type - #[cfg(not(feature = "backtrace"))] - pub fn new(e: Box) -> State { - State { next_error: Some(e) } - } - - /// Returns the inner backtrace if present. - #[cfg(feature = "backtrace")] - pub fn backtrace(&self) -> Option<&Backtrace> { - self.backtrace.as_ref().map(|v| &**v) - } - - /// Returns the inner backtrace if present. - #[cfg(not(feature = "backtrace"))] - pub fn backtrace(&self) -> Option<&Backtrace> { - None - } -} - -/// Exits a function early with an error -/// -/// The `bail!` macro provides an easy way to exit a function. -/// `bail!(expr)` is equivalent to writing. -/// -/// ``` -/// # #[macro_use] extern crate error_chain; -/// # error_chain! { } -/// # fn main() { } -/// # fn foo() -> Result<()> { -/// # let expr = ""; -/// return Err(expr.into()); -/// # } -/// ``` -/// -/// And as shorthand it takes a formatting string a la `println!`: -/// -/// ``` -/// # #[macro_use] extern crate error_chain; -/// # error_chain! { } -/// # fn main() { } -/// # fn foo() -> Result<()> { -/// # let n = 0; -/// bail!("bad number: {}", n); -/// # } -/// ``` -/// -/// # Examples -/// -/// Bailing on a custom error: -/// -/// ``` -/// # #[macro_use] extern crate error_chain; -/// # fn main() {} -/// error_chain! { -/// errors { FooError } -/// } -/// -/// fn foo() -> Result<()> { -/// if bad_condition() { -/// bail!(ErrorKind::FooError); -/// } -/// -/// Ok(()) -/// } -/// -/// # fn bad_condition() -> bool { true } -/// ``` -/// -/// Bailing on a formatted string: -/// -/// ``` -/// # #[macro_use] extern crate error_chain; -/// # fn main() {} -/// error_chain! { } -/// -/// fn foo() -> Result<()> { -/// if let Some(bad_num) = bad_condition() { -/// bail!("so bad: {}", bad_num); -/// } -/// -/// Ok(()) -/// } -/// -/// # fn bad_condition() -> Option { None } -/// ``` -#[macro_export] -macro_rules! bail { - ($e:expr) => { - return Err($e.into()); - }; - ($fmt:expr, $($arg:tt)+) => { - return Err(format!($fmt, $($arg)+).into()); - }; -} - -#[doc(hidden)] -pub mod mock { - error_chain!{} -} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/src/quick_error.rs cargo-0.19.0/vendor/error-chain-0.7.2/src/quick_error.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/src/quick_error.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/src/quick_error.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,529 +0,0 @@ -// From https://github.com/tailhook/quick-error -// Changes: -// - replace `impl Error` by `impl Item::description` -// - $imeta - -#[macro_export] -macro_rules! quick_error { - ( $(#[$meta:meta])* - pub enum $name:ident { $($chunks:tt)* } - ) => { - quick_error!(SORT [pub enum $name $(#[$meta])* ] - items [] buf [] - queue [ $($chunks)* ]); - }; - ( $(#[$meta:meta])* - enum $name:ident { $($chunks:tt)* } - ) => { - quick_error!(SORT [enum $name $(#[$meta])* ] - items [] buf [] - queue [ $($chunks)* ]); - }; - // Queue is empty, can do the work - (SORT [enum $name:ident $( #[$meta:meta] )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [ ] - queue [ ] - ) => { - quick_error!(ENUM_DEFINITION [enum $name $( #[$meta] )*] - body [] - queue [$($( #[$imeta] )* - => $iitem: $imode [$( $ivar: $ityp ),*] )*] - ); - quick_error!(IMPLEMENTATIONS $name {$( - $iitem: $imode [$(#[$imeta])*] [$( $ivar: $ityp ),*] {$( $ifuncs )*} - )*}); - $( - quick_error!(ERROR_CHECK $imode $($ifuncs)*); - )* - }; - (SORT [pub enum $name:ident $( #[$meta:meta] )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [ ] - queue [ ] - ) => { - quick_error!(ENUM_DEFINITION [pub enum $name $( #[$meta] )*] - body [] - queue [$($( #[$imeta] )* - => $iitem: $imode [$( $ivar: $ityp ),*] )*] - ); - quick_error!(IMPLEMENTATIONS $name {$( - $iitem: $imode [$(#[$imeta])*] [$( $ivar: $ityp ),*] {$( $ifuncs )*} - )*}); - $( - quick_error!(ERROR_CHECK $imode $($ifuncs)*); - )* - }; - // Add meta to buffer - (SORT [$( $def:tt )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [$( #[$bmeta:meta] )*] - queue [ #[$qmeta:meta] $( $tail:tt )*] - ) => { - quick_error!(SORT [$( $def )*] - items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )*] - buf [$( #[$bmeta] )* #[$qmeta] ] - queue [$( $tail )*]); - }; - // Add ident to buffer - (SORT [$( $def:tt )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [$( #[$bmeta:meta] )*] - queue [ $qitem:ident $( $tail:tt )*] - ) => { - quick_error!(SORT [$( $def )*] - items [$( $(#[$imeta])* - => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )*] - buf [$(#[$bmeta])* => $qitem : UNIT [ ] ] - queue [$( $tail )*]); - }; - // Flush buffer on meta after ident - (SORT [$( $def:tt )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [$( #[$bmeta:meta] )* - => $bitem:ident: $bmode:tt [$( $bvar:ident: $btyp:ty ),*] ] - queue [ #[$qmeta:meta] $( $tail:tt )*] - ) => { - quick_error!(SORT [$( $def )*] - enum [$( $(#[$emeta])* => $eitem $(( $($etyp),* ))* )* - $(#[$bmeta])* => $bitem: $bmode $(( $($btyp),* ))*] - items [$($( #[$imeta:meta] )* - => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )* - $bitem: $bmode [$( $bvar:$btyp ),*] {} ] - buf [ #[$qmeta] ] - queue [$( $tail )*]); - }; - // Add tuple enum-variant - (SORT [$( $def:tt )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [$( #[$bmeta:meta] )* => $bitem:ident: UNIT [ ] ] - queue [($( $qvar:ident: $qtyp:ty ),+) $( $tail:tt )*] - ) => { - quick_error!(SORT [$( $def )*] - items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )*] - buf [$( #[$bmeta] )* => $bitem: TUPLE [$( $qvar:$qtyp ),*] ] - queue [$( $tail )*] - ); - }; - // Add struct enum-variant - e.g. { descr: &'static str } - (SORT [$( $def:tt )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [$( #[$bmeta:meta] )* => $bitem:ident: UNIT [ ] ] - queue [{ $( $qvar:ident: $qtyp:ty ),+} $( $tail:tt )*] - ) => { - quick_error!(SORT [$( $def )*] - items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )*] - buf [$( #[$bmeta] )* => $bitem: STRUCT [$( $qvar:$qtyp ),*] ] - queue [$( $tail )*]); - }; - // Add struct enum-variant, with excess comma - e.g. { descr: &'static str, } - (SORT [$( $def:tt )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [$( #[$bmeta:meta] )* => $bitem:ident: UNIT [ ] ] - queue [{$( $qvar:ident: $qtyp:ty ),+ ,} $( $tail:tt )*] - ) => { - quick_error!(SORT [$( $def )*] - items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )*] - buf [$( #[$bmeta] )* => $bitem: STRUCT [$( $qvar:$qtyp ),*] ] - queue [$( $tail )*]); - }; - // Add braces and flush always on braces - (SORT [$( $def:tt )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [$( #[$bmeta:meta] )* - => $bitem:ident: $bmode:tt [$( $bvar:ident: $btyp:ty ),*] ] - queue [ {$( $qfuncs:tt )*} $( $tail:tt )*] - ) => { - quick_error!(SORT [$( $def )*] - items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )* - $(#[$bmeta])* => $bitem: $bmode [$( $bvar:$btyp ),*] {$( $qfuncs )*} ] - buf [ ] - queue [$( $tail )*]); - }; - // Flush buffer on double ident - (SORT [$( $def:tt )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [$( #[$bmeta:meta] )* - => $bitem:ident: $bmode:tt [$( $bvar:ident: $btyp:ty ),*] ] - queue [ $qitem:ident $( $tail:tt )*] - ) => { - quick_error!(SORT [$( $def )*] - items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )* - $(#[$bmeta])* => $bitem: $bmode [$( $bvar:$btyp ),*] {} ] - buf [ => $qitem : UNIT [ ] ] - queue [$( $tail )*]); - }; - // Flush buffer on end - (SORT [$( $def:tt )*] - items [$($( #[$imeta:meta] )* - => $iitem:ident: $imode:tt [$( $ivar:ident: $ityp:ty ),*] - {$( $ifuncs:tt )*} )* ] - buf [$( #[$bmeta:meta] )* - => $bitem:ident: $bmode:tt [$( $bvar:ident: $btyp:ty ),*] ] - queue [ ] - ) => { - quick_error!(SORT [$( $def )*] - items [$( $(#[$imeta])* => $iitem: $imode [$( $ivar:$ityp ),*] {$( $ifuncs )*} )* - $(#[$bmeta])* => $bitem: $bmode [$( $bvar:$btyp ),*] {} ] - buf [ ] - queue [ ]); - }; - // Public enum (Queue Empty) - (ENUM_DEFINITION [pub enum $name:ident $( #[$meta:meta] )*] - body [$($( #[$imeta:meta] )* - => $iitem:ident ($(($( $ttyp:ty ),+))*) {$({$( $svar:ident: $styp:ty ),*})*} )* ] - queue [ ] - ) => { - $(#[$meta])* - pub enum $name { - $( - $(#[$imeta])* - $iitem $(($( $ttyp ),*))* $({$( $svar: $styp ),*})*, - )* - } - }; - // Private enum (Queue Empty) - (ENUM_DEFINITION [enum $name:ident $( #[$meta:meta] )*] - body [$($( #[$imeta:meta] )* - => $iitem:ident ($(($( $ttyp:ty ),+))*) {$({$( $svar:ident: $styp:ty ),*})*} )* ] - queue [ ] - ) => { - $(#[$meta])* - enum $name { - $( - $(#[$imeta])* - $iitem $(($( $ttyp ),*))* $({$( $svar: $styp ),*})*, - )* - } - }; - // Unit variant - (ENUM_DEFINITION [$( $def:tt )*] - body [$($( #[$imeta:meta] )* - => $iitem:ident ($(($( $ttyp:ty ),+))*) {$({$( $svar:ident: $styp:ty ),*})*} )* ] - queue [$( #[$qmeta:meta] )* - => $qitem:ident: UNIT [ ] $( $queue:tt )*] - ) => { - quick_error!(ENUM_DEFINITION [ $($def)* ] - body [$($( #[$imeta] )* => $iitem ($(($( $ttyp ),+))*) {$({$( $svar: $styp ),*})*} )* - $( #[$qmeta] )* => $qitem () {} ] - queue [ $($queue)* ] - ); - }; - // Tuple variant - (ENUM_DEFINITION [$( $def:tt )*] - body [$($( #[$imeta:meta] )* - => $iitem:ident ($(($( $ttyp:ty ),+))*) {$({$( $svar:ident: $styp:ty ),*})*} )* ] - queue [$( #[$qmeta:meta] )* - => $qitem:ident: TUPLE [$( $qvar:ident: $qtyp:ty ),+] $( $queue:tt )*] - ) => { - quick_error!(ENUM_DEFINITION [ $($def)* ] - body [$($( #[$imeta] )* => $iitem ($(($( $ttyp ),+))*) {$({$( $svar: $styp ),*})*} )* - $( #[$qmeta] )* => $qitem (($( $qtyp ),*)) {} ] - queue [ $($queue)* ] - ); - }; - // Struct variant - (ENUM_DEFINITION [$( $def:tt )*] - body [$($( #[$imeta:meta] )* - => $iitem:ident ($(($( $ttyp:ty ),+))*) {$({$( $svar:ident: $styp:ty ),*})*} )* ] - queue [$( #[$qmeta:meta] )* - => $qitem:ident: STRUCT [$( $qvar:ident: $qtyp:ty ),*] $( $queue:tt )*] - ) => { - quick_error!(ENUM_DEFINITION [ $($def)* ] - body [$($( #[$imeta] )* => $iitem ($(($( $ttyp ),+))*) {$({$( $svar: $styp ),*})*} )* - $( #[$qmeta] )* => $qitem () {{$( $qvar: $qtyp ),*}} ] - queue [ $($queue)* ] - ); - }; - (IMPLEMENTATIONS - $name:ident {$( - $item:ident: $imode:tt [$(#[$imeta:meta])*] [$( $var:ident: $typ:ty ),*] {$( $funcs:tt )*} - )*} - ) => { - #[allow(unused)] - impl ::std::fmt::Display for $name { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) - -> ::std::fmt::Result - { - match *self { - $( - $(#[$imeta])* - quick_error!(ITEM_PATTERN - $name $item: $imode [$( ref $var ),*] - ) => { - let display_fn = quick_error!(FIND_DISPLAY_IMPL - $name $item: $imode - {$( $funcs )*}); - - display_fn(self, fmt) - } - )* - } - } - } - /*#[allow(unused)] - impl ::std::error::Error for $name { - fn description(&self) -> &str { - match *self { - $( - quick_error!(ITEM_PATTERN - $name $item: $imode [$( ref $var ),*] - ) => { - quick_error!(FIND_DESCRIPTION_IMPL - $item: $imode self fmt [$( $var ),*] - {$( $funcs )*}) - } - )* - } - } - fn cause(&self) -> Option<&::std::error::Error> { - match *self { - $( - quick_error!(ITEM_PATTERN - $name $item: $imode [$( ref $var ),*] - ) => { - quick_error!(FIND_CAUSE_IMPL - $item: $imode [$( $var ),*] - {$( $funcs )*}) - } - )* - } - } - }*/ - #[allow(unused)] - impl $name { - /// A string describing the error kind. - pub fn description(&self) -> &str { - match *self { - $( - $(#[$imeta])* - quick_error!(ITEM_PATTERN - $name $item: $imode [$( ref $var ),*] - ) => { - quick_error!(FIND_DESCRIPTION_IMPL - $item: $imode self fmt [$( $var ),*] - {$( $funcs )*}) - } - )* - } - } - } - $( - quick_error!(FIND_FROM_IMPL - $name $item: $imode [$( $var:$typ ),*] - {$( $funcs )*}); - )* - }; - (FIND_DISPLAY_IMPL $name:ident $item:ident: $imode:tt - { display($self_:tt) -> ($( $exprs:tt )*) $( $tail:tt )*} - ) => { - |quick_error!(IDENT $self_): &$name, f: &mut ::std::fmt::Formatter| { - write!(f, $( $exprs )*) - } - }; - (FIND_DISPLAY_IMPL $name:ident $item:ident: $imode:tt - { display($pattern:expr) $( $tail:tt )*} - ) => { - |_, f: &mut ::std::fmt::Formatter| { write!(f, $pattern) } - }; - (FIND_DISPLAY_IMPL $name:ident $item:ident: $imode:tt - { display($pattern:expr, $( $exprs:tt )*) $( $tail:tt )*} - ) => { - |_, f: &mut ::std::fmt::Formatter| { write!(f, $pattern, $( $exprs )*) } - }; - (FIND_DISPLAY_IMPL $name:ident $item:ident: $imode:tt - { $t:tt $( $tail:tt )*} - ) => { - quick_error!(FIND_DISPLAY_IMPL - $name $item: $imode - {$( $tail )*}) - }; - (FIND_DISPLAY_IMPL $name:ident $item:ident: $imode:tt - { } - ) => { - |self_: &$name, f: &mut ::std::fmt::Formatter| { - write!(f, "{}", self_.description()) - } - }; - (FIND_DESCRIPTION_IMPL $item:ident: $imode:tt $me:ident $fmt:ident - [$( $var:ident ),*] - { description($expr:expr) $( $tail:tt )*} - ) => { - $expr - }; - (FIND_DESCRIPTION_IMPL $item:ident: $imode:tt $me:ident $fmt:ident - [$( $var:ident ),*] - { $t:tt $( $tail:tt )*} - ) => { - quick_error!(FIND_DESCRIPTION_IMPL - $item: $imode $me $fmt [$( $var ),*] - {$( $tail )*}) - }; - (FIND_DESCRIPTION_IMPL $item:ident: $imode:tt $me:ident $fmt:ident - [$( $var:ident ),*] - { } - ) => { - stringify!($item) - }; - (FIND_CAUSE_IMPL $item:ident: $imode:tt - [$( $var:ident ),*] - { cause($expr:expr) $( $tail:tt )*} - ) => { - Some($expr) - }; - (FIND_CAUSE_IMPL $item:ident: $imode:tt - [$( $var:ident ),*] - { $t:tt $( $tail:tt )*} - ) => { - quick_error!(FIND_CAUSE_IMPL - $item: $imode [$( $var ),*] - { $($tail)* }) - }; - (FIND_CAUSE_IMPL $item:ident: $imode:tt - [$( $var:ident ),*] - { } - ) => { - None - }; - (FIND_FROM_IMPL $name:ident $item:ident: $imode:tt - [$( $var:ident: $typ:ty ),*] - { from() $( $tail:tt )*} - ) => { - $( - impl From<$typ> for $name { - fn from($var: $typ) -> $name { - $name::$item($var) - } - } - )* - quick_error!(FIND_FROM_IMPL - $name $item: $imode [$( $var:$typ ),*] - {$( $tail )*}); - }; - (FIND_FROM_IMPL $name:ident $item:ident: UNIT - [ ] - { from($ftyp:ty) $( $tail:tt )*} - ) => { - impl From<$ftyp> for $name { - fn from(_discarded_error: $ftyp) -> $name { - $name::$item - } - } - quick_error!(FIND_FROM_IMPL - $name $item: UNIT [ ] - {$( $tail )*}); - }; - (FIND_FROM_IMPL $name:ident $item:ident: TUPLE - [$( $var:ident: $typ:ty ),*] - { from($fvar:ident: $ftyp:ty) -> ($( $texpr:expr ),*) $( $tail:tt )*} - ) => { - impl From<$ftyp> for $name { - fn from($fvar: $ftyp) -> $name { - $name::$item($( $texpr ),*) - } - } - quick_error!(FIND_FROM_IMPL - $name $item: TUPLE [$( $var:$typ ),*] - { $($tail)* }); - }; - (FIND_FROM_IMPL $name:ident $item:ident: STRUCT - [$( $var:ident: $typ:ty ),*] - { from($fvar:ident: $ftyp:ty) -> {$( $tvar:ident: $texpr:expr ),*} $( $tail:tt )*} - ) => { - impl From<$ftyp> for $name { - fn from($fvar: $ftyp) -> $name { - $name::$item { - $( $tvar: $texpr ),* - } - } - } - quick_error!(FIND_FROM_IMPL - $name $item: STRUCT [$( $var:$typ ),*] - { $($tail)* }); - }; - (FIND_FROM_IMPL $name:ident $item:ident: $imode:tt - [$( $var:ident: $typ:ty ),*] - { $t:tt $( $tail:tt )*} - ) => { - quick_error!(FIND_FROM_IMPL - $name $item: $imode [$( $var:$typ ),*] - {$( $tail )*} - ); - }; - (FIND_FROM_IMPL $name:ident $item:ident: $imode:tt - [$( $var:ident: $typ:ty ),*] - { } - ) => { - }; - (ITEM_BODY $(#[$imeta:meta])* $item:ident: UNIT - ) => { }; - (ITEM_BODY $(#[$imeta:meta])* $item:ident: TUPLE - [$( $typ:ty ),*] - ) => { - ($( $typ ),*) - }; - (ITEM_BODY $(#[$imeta:meta])* $item:ident: STRUCT - [$( $var:ident: $typ:ty ),*] - ) => { - {$( $var:$typ ),*} - }; - (ITEM_PATTERN $name:ident $item:ident: UNIT [] - ) => { - $name::$item - }; - (ITEM_PATTERN $name:ident $item:ident: TUPLE - [$( ref $var:ident ),*] - ) => { - $name::$item ($( ref $var ),*) - }; - (ITEM_PATTERN $name:ident $item:ident: STRUCT - [$( ref $var:ident ),*] - ) => { - $name::$item {$( ref $var ),*} - }; - // This one should match all allowed sequences in "funcs" but not match - // anything else. - // This is to contrast FIND_* clauses which just find stuff they need and - // skip everything else completely - (ERROR_CHECK $imode:tt display($self_:tt) -> ($( $exprs:tt )*) $( $tail:tt )*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; - (ERROR_CHECK $imode:tt display($pattern: expr) $( $tail:tt )*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; - (ERROR_CHECK $imode:tt display($pattern: expr, $( $exprs:tt )*) $( $tail:tt )*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; - (ERROR_CHECK $imode:tt description($expr:expr) $( $tail:tt )*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; - (ERROR_CHECK $imode:tt cause($expr:expr) $($tail:tt)*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; - (ERROR_CHECK $imode:tt from() $($tail:tt)*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; - (ERROR_CHECK $imode:tt from($ftyp:ty) $($tail:tt)*) - => { quick_error!(ERROR_CHECK $imode $($tail)*); }; - (ERROR_CHECK TUPLE from($fvar:ident: $ftyp:ty) -> ($( $e:expr ),*) $( $tail:tt )*) - => { quick_error!(ERROR_CHECK TUPLE $($tail)*); }; - (ERROR_CHECK STRUCT from($fvar:ident: $ftyp:ty) -> {$( $v:ident: $e:expr ),*} $( $tail:tt )*) - => { quick_error!(ERROR_CHECK STRUCT $($tail)*); }; - (ERROR_CHECK $imode:tt ) => {}; - // Utility functions - (IDENT $ident:ident) => { $ident } -} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/src/quick_main.rs cargo-0.19.0/vendor/error-chain-0.7.2/src/quick_main.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/src/quick_main.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/src/quick_main.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,77 +0,0 @@ -/// Convenient wrapper to be able to use `try!` and such in the main. You can -/// use it with a separated function: -/// -/// ```ignore -/// # #[macro_use] extern crate error_chain; -/// # error_chain! {} -/// quick_main!(run); -/// -/// fn run() -> Result<()> { -/// Err("error".into()) -/// } -/// ``` -/// -/// or with a closure: -/// -/// ```ignore -/// # #[macro_use] extern crate error_chain; -/// # error_chain! {} -/// quick_main!(|| -> Result<()> { -/// Err("error".into()) -/// }); -/// ``` -/// -/// You can also set the exit value of the process by returning a type that implements [`ExitCode`](trait.ExitCode.html): -/// -/// ```ignore -/// # #[macro_use] extern crate error_chain; -/// # error_chain! {} -/// quick_main!(run); -/// -/// fn run() -> Result { -/// Err("error".into()) -/// } -/// ``` -#[macro_export] -macro_rules! quick_main { - ($main:expr) => { - fn main() { - use ::std::io::Write; - let stderr = &mut ::std::io::stderr(); - let errmsg = "Error writing to stderr"; - - ::std::process::exit(match $main() { - Ok(ret) => $crate::ExitCode::code(ret), - Err(ref e) => { - let e: &$crate::ChainedError = e; - writeln!(stderr, "Error: {}", e).expect(errmsg); - - for e in e.iter().skip(1) { - writeln!(stderr, "Caused by: {}", e).expect(errmsg); - } - - if let Some(backtrace) = e.backtrace() { - writeln!(stderr, "{:?}", backtrace).expect(errmsg); - } - - 1 - } - }); - } - }; -} - -/// Represents a value that can be used as the exit status of the process. -/// See [`quick_main!`](macro.quick_main.html). -pub trait ExitCode { - /// Returns the value to use as the exit status. - fn code(self) -> i32; -} - -impl ExitCode for i32 { - fn code(self) -> i32 { self } -} - -impl ExitCode for () { - fn code(self) -> i32 { 0 } -} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/tests/quick_main.rs cargo-0.19.0/vendor/error-chain-0.7.2/tests/quick_main.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/tests/quick_main.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/tests/quick_main.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -#![allow(dead_code)] -#[macro_use] -extern crate error_chain; - -error_chain!(); - -mod unit { - use super::*; - quick_main!(run); - - fn run() -> Result<()> { - Ok(()) - } -} - -mod i32 { - use super::*; - quick_main!(run); - - fn run() -> Result { - Ok(1) - } -} - -mod closure { - use super::*; - quick_main!(|| -> Result<()> { - Ok(()) - }); -} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/tests/tests.rs cargo-0.19.0/vendor/error-chain-0.7.2/tests/tests.rs --- cargo-0.17.0/vendor/error-chain-0.7.2/tests/tests.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/tests/tests.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,543 +0,0 @@ -#![allow(dead_code)] -//#![feature(trace_macros)] -// -//trace_macros!(true); - -#[macro_use] -extern crate error_chain; - -#[test] -fn smoke_test_1() { - error_chain! { - types { - Error, ErrorKind, ResultExt, Result; - } - - links { } - - foreign_links { } - - errors { } - }; -} - -#[test] -fn smoke_test_2() { - error_chain! { - types { } - - links { } - - foreign_links { } - - errors { } - }; -} - -#[test] -fn smoke_test_3() { - error_chain! { - links { } - - foreign_links { } - - errors { } - }; -} - -#[test] -fn smoke_test_4() { - error_chain! { - links { } - - foreign_links { } - - errors { - HttpStatus(e: u32) { - description("http request returned an unsuccessful status code") - display("http request returned an unsuccessful status code: {}", e) - } - } - }; -} - -#[test] -fn smoke_test_5() { - error_chain! { - types { } - - links { } - - foreign_links { } - - errors { - HttpStatus(e: u32) { - description("http request returned an unsuccessful status code") - display("http request returned an unsuccessful status code: {}", e) - } - } - }; -} - -#[test] -fn smoke_test_6() { - error_chain! { - errors { - HttpStatus(e: u32) { - description("http request returned an unsuccessful status code") - display("http request returned an unsuccessful status code: {}", e) - } - } - }; -} - -#[test] -fn smoke_test_7() { - error_chain! { - types { } - - foreign_links { } - - errors { - HttpStatus(e: u32) { - description("http request returned an unsuccessful status code") - display("http request returned an unsuccessful status code: {}", e) - } - } - }; -} - -#[test] -fn smoke_test_8() { - error_chain! { - types { } - - links { } - links { } - - foreign_links { } - foreign_links { } - - errors { - FileNotFound - AccessDenied - } - }; -} - -#[test] -fn order_test_1() { - error_chain! { types { } links { } foreign_links { } errors { } }; -} - -#[test] -fn order_test_2() { - error_chain! { links { } types { } foreign_links { } errors { } }; -} - -#[test] -fn order_test_3() { - error_chain! { foreign_links { } links { } errors { } types { } }; -} - -#[test] -fn order_test_4() { - error_chain! { errors { } types { } foreign_links { } }; -} - -#[test] -fn order_test_5() { - error_chain! { foreign_links { } types { } }; -} - -#[test] -fn order_test_6() { - error_chain! { - links { } - - errors { - HttpStatus(e: u32) { - description("http request returned an unsuccessful status code") - display("http request returned an unsuccessful status code: {}", e) - } - } - - - foreign_links { } - }; -} - -#[test] -fn order_test_7() { - error_chain! { - links { } - - foreign_links { } - - types { - Error, ErrorKind, ResultExt, Result; - } - }; -} - - -#[test] -fn order_test_8() { - error_chain! { - links { } - - foreign_links { } - foreign_links { } - - types { - Error, ErrorKind, ResultExt, Result; - } - }; -} - -#[test] -fn empty() { - error_chain! { }; -} - -#[test] -#[cfg(feature = "backtrace")] -fn has_backtrace_depending_on_env() { - use std::env; - - error_chain! { - types {} - links {} - foreign_links {} - errors { - MyError - } - } - - let original_value = env::var_os("RUST_BACKTRACE"); - - // missing RUST_BACKTRACE and RUST_BACKTRACE=0 - env::remove_var("RUST_BACKTRACE"); - let err = Error::from(ErrorKind::MyError); - assert!(err.backtrace().is_none()); - env::set_var("RUST_BACKTRACE", "0"); - let err = Error::from(ErrorKind::MyError); - assert!(err.backtrace().is_none()); - - // RUST_BACKTRACE set to anything but 0 - env::set_var("RUST_BACKTRACE", "yes"); - let err = Error::from(ErrorKind::MyError); - assert!(err.backtrace().is_some()); - - if let Some(var) = original_value { - env::set_var("RUST_BACKTRACE", var); - } -} - -#[test] -fn chain_err() { - use std::fmt; - - error_chain! { - foreign_links { - Fmt(fmt::Error); - } - errors { - Test - } - } - - let _: Result<()> = Err(fmt::Error).chain_err(|| ""); - let _: Result<()> = Err(Error::from_kind(ErrorKind::Test)).chain_err(|| ""); -} - -#[test] -fn links() { - mod test { - error_chain! {} - } - - error_chain! { - links { - Test(test::Error, test::ErrorKind); - } - } -} - -#[cfg(test)] -mod foreign_link_test { - - use std::fmt; - - // Note: foreign errors must be `pub` because they appear in the - // signature of the public foreign_link_error_path - #[derive(Debug)] - pub struct ForeignError { - cause: ForeignErrorCause - } - - impl ::std::error::Error for ForeignError { - fn description(&self) -> &'static str { - "Foreign error description" - } - - fn cause(&self) -> Option<&::std::error::Error> { Some(&self.cause) } - } - - impl fmt::Display for ForeignError { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "Foreign error display") - } - } - - #[derive(Debug)] - pub struct ForeignErrorCause {} - - impl ::std::error::Error for ForeignErrorCause { - fn description(&self) -> &'static str { - "Foreign error cause description" - } - - fn cause(&self) -> Option<&::std::error::Error> { None } - } - - impl fmt::Display for ForeignErrorCause { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "Foreign error cause display") - } - } - - error_chain! { - types{ - Error, ErrorKind, ResultExt, Result; - } - links {} - foreign_links { - Foreign(ForeignError); - Io(::std::io::Error); - } - errors {} - } - - #[test] - fn display_underlying_error() { - let chained_error = try_foreign_error().err().unwrap(); - assert_eq!( - format!("{}", ForeignError{ cause: ForeignErrorCause{} }), - format!("{}", chained_error) - ); - } - - #[test] - fn finds_cause() { - let chained_error = try_foreign_error().err().unwrap(); - assert_eq!( - format!("{}", ForeignErrorCause{}), - format!("{}", ::std::error::Error::cause(&chained_error).unwrap()) - ); - } - - #[test] - fn iterates() { - let chained_error = try_foreign_error().err().unwrap(); - let mut error_iter = chained_error.iter(); - assert_eq!( - format!("{}", ForeignError{ cause: ForeignErrorCause{} }), - format!("{}", error_iter.next().unwrap()) - ); - assert_eq!( - format!("{}", ForeignErrorCause{}), - format!("{}", error_iter.next().unwrap()) - ); - assert_eq!( - format!("{:?}", None as Option<&::std::error::Error>), - format!("{:?}", error_iter.next()) - ); - } - - fn try_foreign_error() -> Result<()> { - try!(Err(ForeignError{ - cause: ForeignErrorCause{} - })); - Ok(()) - } -} - -#[cfg(test)] -mod attributes_test { - #[allow(unused_imports)] - use std::io; - - #[cfg(not(test))] - mod inner { - error_chain! { - - } - } - - error_chain! { - types { - Error, ErrorKind, ResultExt, Result; - } - - links { - Inner(inner::Error, inner::ErrorKind) #[cfg(not(test))]; - } - - foreign_links { - Io(io::Error) #[cfg(not(test))]; - } - - errors { - #[cfg(not(test))] - AnError { - - } - } - } -} - -#[test] -fn with_result() { - error_chain! { - types { - Error, ErrorKind, ResultExt, Result; - } - } - let _: Result<()> = Ok(()); -} - -#[test] -fn without_result() { - error_chain! { - types { - Error, ErrorKind, ResultExt; - } - } - let _: Result<(), ()> = Ok(()); -} - -#[test] -fn documentation() { - mod inner { - error_chain! {} - } - - error_chain! { - links { - Inner(inner::Error, inner::ErrorKind) #[doc = "Doc"]; - } - foreign_links { - Io(::std::io::Error) #[doc = "Doc"]; - } - errors { - /// Doc - Variant - } - } -} - -#[cfg(test)] -mod multiple_error_same_mod { - error_chain! { - types { - MyError, MyErrorKind, MyResultExt, MyResult; - } - } - error_chain! {} -} - -#[doc(test)] -#[deny(dead_code)] -mod allow_dead_code { - error_chain! {} -} - -// Make sure links actually work! -#[test] -fn rustup_regression() { - error_chain! { - links { - Download(error_chain::mock::Error, error_chain::mock::ErrorKind); - } - - foreign_links { } - - errors { - LocatingWorkingDir { - description("could not locate working directory") - } - } - } -} - -#[test] -fn error_patterns() { - error_chain! { - links { } - - foreign_links { } - - errors { } - } - - // Tuples look nice when matching errors - match Error::from("Test") { - Error(ErrorKind::Msg(_), _) => { - } - } -} - -#[test] -fn error_first() { - error_chain! { - errors { - LocatingWorkingDir { - description("could not locate working directory") - } - } - - links { - Download(error_chain::mock::Error, error_chain::mock::ErrorKind); - } - - foreign_links { } - } -} - -#[test] -fn bail() { - error_chain! { - errors { Foo } - } - - fn foo() -> Result<()> { - bail!(ErrorKind::Foo) - } - - fn bar() -> Result<()> { - bail!("bar") - } - - fn baz() -> Result<()> { - bail!("{}", "baz") - } -} - -/// Since the `types` declaration is a list of symbols, check if we -/// don't change their meaning or order. -#[test] -fn types_declarations() { - error_chain! { - types { - MyError, MyErrorKind, MyResultExt, MyResult; - } - } - - MyError::from_kind(MyErrorKind::Msg("".into())); - - let err: Result<(), ::std::io::Error> = Ok(()); - MyResultExt::chain_err(err, || "").unwrap(); - - let _: MyResult<()> = Ok(()); -} diff -Nru cargo-0.17.0/vendor/error-chain-0.7.2/.travis.yml cargo-0.19.0/vendor/error-chain-0.7.2/.travis.yml --- cargo-0.17.0/vendor/error-chain-0.7.2/.travis.yml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/error-chain-0.7.2/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -language: rust -rust: -- stable -- beta -- nightly -# Oldest supported version for all features. -# Use of https://github.com/rust-lang/rfcs/pull/16 -- 1.13.0 -# Oldest supported version as dependency, with no features, tests, or examples. -- 1.10.0 - -sudo: false -cache: cargo -addons: - apt: - packages: - - libcurl4-openssl-dev - - libelf-dev - - libdw-dev - -before_script: -- | - pip install 'travis-cargo<0.2' --user && - export PATH=$HOME/.local/bin:$PATH - -script: -- travis-cargo build -- $FEATURES -- travis-cargo --skip 1.10.0 test -- $FEATURES - -after_success: -- travis-cargo --only stable doc -- travis-cargo --only stable doc-upload - -env: - global: - - secure: ncxJbvJM1vCZfcEftjsFKJMxxhKLgWKaR8Go9AMo0VB5fB2XVW/6NYO5bQEEYpOf1Nc/+2FbI2+Dkz0S/mJpUcNSfBgablCHgwU2sHse7KsoaqfHj2mf1E3exjzSHoP96hPGicC5zAjSXFjCgJPOUSGqqRaJ7z5AsJLhJT6LuK7QpvwPBZzklUN8T+n1sVmws8TNmRIbaniq/q6wYHANHcy6Dl59dx4sKwniUGiZdUhCiddVpoxbECSxc0A8mN2pk7/aW+WGxK3goBs5ZF7+JXF318F62pDcXQmR5CX6WdpenIcJ25g1Vg1WhQ4Ifpe17CN0bfxV8ShuzrQUThCDMffZCo9XySBtODdEowwK1UIpjnFLfIxjOs45Cd8o3tM2j0CfvtnjOz6BCdUU0qiwNPPNx0wFkx3ZiOfSh+FhBhvyPM12HN2tdN0esgVBItFmEci+sSIIXqjVL6DNiu5zTjbu0bs6COwlUWdmL6vmsZtq5tl7Cno9+C3szxRVAkShGydd04l9NYjqNEzTa1EPG50OsnVRKGdRiFzSxhc3BWExNKvcQ4v867t6/PpPkW6s4oXmYI3+De+8O7ExWc6a4alcrDXKlMs5fCb5Pcd4Ju9kowcjkoJo5yf2wW3Ox5R8SJpaEEpvyhx5O/qtIxjhHNzeo8Wsr/6gdNDv20r91TI= - - TRAVIS_CARGO_NIGHTLY_FEATURE="" - matrix: - - FEATURES=--features=backtrace - - FEATURES=--no-default-features - -matrix: - exclude: - - env: FEATURES=--features=backtrace - rust: 1.10.0 diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/appveyor.yml cargo-0.19.0/vendor/flate2-0.2.14/appveyor.yml --- cargo-0.17.0/vendor/flate2-0.2.14/appveyor.yml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -environment: - matrix: - - TARGET: x86_64-pc-windows-msvc - - TARGET: i686-pc-windows-msvc - - TARGET: i686-pc-windows-gnu -install: - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" - - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin - - SET PATH=%PATH%;C:\MinGW\bin - - rustc -V - - cargo -V - -build: false - -test_script: - - cargo test --verbose diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/.cargo-checksum.json cargo-0.19.0/vendor/flate2-0.2.14/.cargo-checksum.json --- cargo-0.17.0/vendor/flate2-0.2.14/.cargo-checksum.json 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"40fd0eae0995ad5e6ecf80bc2a5d3930d163893729014be6a8fd685f498c6938","Cargo.toml":"611ca5c56a9f251b1e32d4665a7b0597ac842b8fe1d45f5b715ab38a0bb585cf","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"5338f9d2f7f4c98aa76e7e857441e763749e4a3846cceb3b6ed6c3410dcb0b63","appveyor.yml":"da991211b72fa6f231af7adb84c9fb72f5a9131d1c0a3d47b8ceffe5a82c8542","src/bufreader.rs":"0a1213858056c8154066f45df7b261c052c6a2c55ec88bc21f56ad3f2748d8c4","src/crc.rs":"99a8dcdf1daf5ec0e2d27479136f94a5d1fddd52c4b4879dcc58060074b9422f","src/deflate.rs":"076cdf629d31d83a76d67360deff293931759bb31d96ced6840d67fabe4315bf","src/ffi.rs":"4c5393c45b867e215e8eed02d7a84b677d8d48dfb04e26938ea0c8832bcecb53","src/gz.rs":"961966aea587778cec8470592bdf0856966c960bce4773fea4e0c4d93d819e8e","src/lib.rs":"b7080f67eca913bedfcb780575653dcc076e7592dfdaf4d086153d563dd1834a","src/mem.rs":"8746f28f637d8e3e1b08164b94ddf23c01fba008815fc9dd74b238d88fcadaf9","src/zio.rs":"45c607caac99663a01f5b429d2706a57cdc66cc3e3657c0b75e15ca9b4849ac2","src/zlib.rs":"97df6d8c141cb4b0389457ecb3c3c37b6ad5d28435fe49293c9a5e9552b84e42","tests/corrupt-file.gz":"083dd284aa1621916a2d0f66ea048c8d3ba7a722b22d0d618722633f51e7d39c","tests/good-file.gz":"87296963e53024a74752179ce7e54087565d358a85d3e65c3b37ef36eaa3d4a6","tests/good-file.txt":"bc4e03658a441fe2ad2df7cd2197144b87e41696f01e327b380e869cd9b485a0","tests/gunzip.rs":"86de051daafeb306e3958f27a1ae2385b56ec87516951701ad4627d52408fe27"},"package":"3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/Cargo.toml cargo-0.19.0/vendor/flate2-0.2.14/Cargo.toml --- cargo-0.17.0/vendor/flate2-0.2.14/Cargo.toml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -[package] - -name = "flate2" -authors = ["Alex Crichton "] -version = "0.2.14" -license = "MIT/Apache-2.0" -readme = "README.md" -keywords = ["gzip", "flate", "zlib", "encoding"] -repository = "https://github.com/alexcrichton/flate2-rs" -homepage = "https://github.com/alexcrichton/flate2-rs" -documentation = "http://alexcrichton.com/flate2-rs" -description = """ -Bindings to miniz.c for DEFLATE compression and decompression exposed as -Reader/Writer streams. Contains bindings for zlib, deflate, and gzip-based -streams. -""" - -[dependencies] -libc = "0.2" -miniz-sys = { path = "miniz-sys", version = "0.1.7", optional = true } -libz-sys = { version = "1.0", optional = true } - -[dev-dependencies] -rand = "0.3" - -[features] -default = ["miniz-sys"] -zlib = ["libz-sys"] diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/.gitignore cargo-0.19.0/vendor/flate2-0.2.14/.gitignore --- cargo-0.17.0/vendor/flate2-0.2.14/.gitignore 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -target -Cargo.lock diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/LICENSE-APACHE cargo-0.19.0/vendor/flate2-0.2.14/LICENSE-APACHE --- cargo-0.17.0/vendor/flate2-0.2.14/LICENSE-APACHE 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/LICENSE-MIT cargo-0.19.0/vendor/flate2-0.2.14/LICENSE-MIT --- cargo-0.17.0/vendor/flate2-0.2.14/LICENSE-MIT 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) 2014 Alex Crichton - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/README.md cargo-0.19.0/vendor/flate2-0.2.14/README.md --- cargo-0.17.0/vendor/flate2-0.2.14/README.md 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,70 +0,0 @@ -# flate2 - -[![Build Status](https://travis-ci.org/alexcrichton/flate2-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/flate2-rs) -[![Build status](https://ci.appveyor.com/api/projects/status/9tatexq47i3ee13k?svg=true)](https://ci.appveyor.com/project/alexcrichton/flate2-rs) - -[Documentation](http://alexcrichton.com/flate2-rs/flate2/index.html) - -A streaming compression/decompression library for Rust. The underlying -implementation by default uses [`miniz`](https://code.google.com/p/miniz/) but -can optionally be get configured to use the system zlib, if available. - -Supported formats: - -* deflate -* zlib -* gzip - -```toml -# Cargo.toml -[dependencies] -flate2 = "0.2" -``` - -Using zlib instead of miniz: - -```toml -[dependencies] -flate2 = { version = "0.2", features = ["zlib"], default-features = false } -``` - -## Compression - -```rust -extern crate flate2; - -use std::io::prelude::*; -use flate2::Compression; -use flate2::write::ZlibEncoder; - -fn main() { - let mut e = ZlibEncoder::new(Vec::new(), Compression::Default); - e.write(b"foo"); - e.write(b"bar"); - let compressed_bytes = e.finish(); -} -``` - -## Decompression - -```rust,no_run -extern crate flate2; - -use std::io::prelude::*; -use flate2::read::GzDecoder; - -fn main() { - let mut d = GzDecoder::new("...".as_bytes()).unwrap(); - let mut s = String::new(); - d.read_to_string(&mut s).unwrap(); - println!("{}", s); -} -``` - -# License - -`flate2-rs` is primarily distributed under the terms of both the MIT license and -the Apache License (Version 2.0), with portions covered by various BSD-like -licenses. - -See LICENSE-APACHE, and LICENSE-MIT for details. diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/src/bufreader.rs cargo-0.19.0/vendor/flate2-0.2.14/src/bufreader.rs --- cargo-0.17.0/vendor/flate2-0.2.14/src/bufreader.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/src/bufreader.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,87 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::cmp; -use std::io; -use std::io::prelude::*; -use std::mem; - -pub struct BufReader { - inner: R, - buf: Box<[u8]>, - pos: usize, - cap: usize, -} - -impl BufReader { - pub fn new(inner: R) -> BufReader { - BufReader::with_buf(vec![0; 32 * 1024], inner) - } - - pub fn with_buf(buf: Vec, inner: R) -> BufReader { - BufReader { - inner: inner, - buf: buf.into_boxed_slice(), - pos: 0, - cap: 0, - } - } - - pub fn get_ref(&self) -> &R { - &self.inner - } - - pub fn get_mut(&mut self) -> &mut R { - &mut self.inner - } - - pub fn into_inner(self) -> R { - self.inner - } - - pub fn reset(&mut self, inner: R) -> R { - self.pos = 0; - self.cap = 0; - mem::replace(&mut self.inner, inner) - } -} - -impl Read for BufReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - // If we don't have any buffered data and we're doing a massive read - // (larger than our internal buffer), bypass our internal buffer - // entirely. - if self.pos == self.cap && buf.len() >= self.buf.len() { - return self.inner.read(buf); - } - let nread = { - let mut rem = try!(self.fill_buf()); - try!(rem.read(buf)) - }; - self.consume(nread); - Ok(nread) - } -} - -impl BufRead for BufReader { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - // If we've reached the end of our internal buffer then we need to fetch - // some more data from the underlying reader. - if self.pos == self.cap { - self.cap = try!(self.inner.read(&mut self.buf)); - self.pos = 0; - } - Ok(&self.buf[self.pos..self.cap]) - } - - fn consume(&mut self, amt: usize) { - self.pos = cmp::min(self.pos + amt, self.cap); - } -} diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/src/crc.rs cargo-0.19.0/vendor/flate2-0.2.14/src/crc.rs --- cargo-0.17.0/vendor/flate2-0.2.14/src/crc.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/src/crc.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,79 +0,0 @@ -//! Simple CRC bindings backed by miniz.c - -use std::io::prelude::*; -use std::io; -use libc; - -use ffi; - -pub struct Crc { - crc: libc::c_ulong, - amt: u32, -} - -pub struct CrcReader { - inner: R, - crc: Crc, -} - -impl Crc { - pub fn new() -> Crc { - Crc { crc: 0, amt: 0 } - } - - pub fn sum(&self) -> u32 { - self.crc as u32 - } - - pub fn amt_as_u32(&self) -> u32 { - self.amt - } - - pub fn update(&mut self, data: &[u8]) { - self.amt = self.amt.wrapping_add(data.len() as u32); - self.crc = unsafe { - ffi::mz_crc32(self.crc, data.as_ptr(), data.len() as libc::size_t) - }; - } -} - -impl CrcReader { - pub fn new(r: R) -> CrcReader { - CrcReader { - inner: r, - crc: Crc::new(), - } - } - - pub fn crc(&self) -> &Crc { - &self.crc - } - - pub fn into_inner(self) -> R { - self.inner - } - - pub fn inner(&mut self) -> &mut R { - &mut self.inner - } -} - -impl Read for CrcReader { - fn read(&mut self, into: &mut [u8]) -> io::Result { - let amt = try!(self.inner.read(into)); - self.crc.update(&into[..amt]); - Ok(amt) - } -} - -impl BufRead for CrcReader { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - self.inner.fill_buf() - } - fn consume(&mut self, amt: usize) { - if let Ok(data) = self.inner.fill_buf() { - self.crc.update(&data[..amt]); - } - self.inner.consume(amt); - } -} diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/src/deflate.rs cargo-0.19.0/vendor/flate2-0.2.14/src/deflate.rs --- cargo-0.17.0/vendor/flate2-0.2.14/src/deflate.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/src/deflate.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,561 +0,0 @@ -//! DEFLATE compression and decompression of streams - -use std::io::prelude::*; -use std::io; -use std::mem; - -use bufreader::BufReader; -use zio; -use {Compress, Decompress}; - -/// A DEFLATE encoder, or compressor. -/// -/// This structure implements a `Write` interface and takes a stream of -/// uncompressed data, writing the compressed data to the wrapped writer. -pub struct EncoderWriter { - inner: zio::Writer, -} - -/// A DEFLATE encoder, or compressor. -/// -/// This structure implements a `Read` interface and will read uncompressed -/// data from an underlying stream and emit a stream of compressed data. -pub struct EncoderReader { - inner: EncoderReaderBuf>, -} - -/// A DEFLATE encoder, or compressor. -/// -/// This structure implements a `BufRead` interface and will read uncompressed -/// data from an underlying stream and emit a stream of compressed data. -pub struct EncoderReaderBuf { - obj: R, - data: Compress, -} - -/// A DEFLATE decoder, or decompressor. -/// -/// This structure implements a `Read` interface and takes a stream of -/// compressed data as input, providing the decompressed data when read from. -pub struct DecoderReader { - inner: DecoderReaderBuf>, -} - -/// A DEFLATE decoder, or decompressor. -/// -/// This structure implements a `BufRead` interface and takes a stream of -/// compressed data as input, providing the decompressed data when read from. -pub struct DecoderReaderBuf { - obj: R, - data: Decompress, -} - -/// A DEFLATE decoder, or decompressor. -/// -/// This structure implements a `Write` and will emit a stream of decompressed -/// data when fed a stream of compressed data. -pub struct DecoderWriter { - inner: zio::Writer, -} - -impl EncoderWriter { - /// Creates a new encoder which will write compressed data to the stream - /// given at the given compression level. - /// - /// When this encoder is dropped or unwrapped the final pieces of data will - /// be flushed. - pub fn new(w: W, level: ::Compression) -> EncoderWriter { - EncoderWriter { - inner: zio::Writer::new(w, Compress::new(level, false)), - } - } - - /// Resets the state of this encoder entirely, swapping out the output - /// stream for another. - /// - /// This function will finish encoding the current stream into the current - /// output stream before swapping out the two output streams. If the stream - /// cannot be finished an error is returned. - /// - /// After the current stream has been finished, this will reset the internal - /// state of this encoder and replace the output stream with the one - /// provided, returning the previous output stream. Future data written to - /// this encoder will be the compressed into the stream `w` provided. - pub fn reset(&mut self, w: W) -> io::Result { - try!(self.inner.finish()); - self.inner.data.reset(); - Ok(self.inner.replace(w)) - } - - /// Consumes this encoder, flushing the output stream. - /// - /// This will flush the underlying data stream and then return the contained - /// writer if the flush succeeded. - pub fn finish(mut self) -> io::Result { - try!(self.inner.finish()); - Ok(self.inner.into_inner()) - } -} - -impl Write for EncoderWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -impl EncoderReader { - /// Creates a new encoder which will read uncompressed data from the given - /// stream and emit the compressed stream. - pub fn new(r: R, level: ::Compression) -> EncoderReader { - EncoderReader { - inner: EncoderReaderBuf::new(BufReader::new(r), level), - } - } - - /// Resets the state of this encoder entirely, swapping out the input - /// stream for another. - /// - /// This function will reset the internal state of this encoder and replace - /// the input stream with the one provided, returning the previous input - /// stream. Future data read from this encoder will be the compressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.inner.data.reset(); - self.inner.obj.reset(r) - } - - /// Acquires a reference to the underlying reader - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this encoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } -} - -impl Read for EncoderReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.read(buf) - } -} - -impl EncoderReaderBuf { - /// Creates a new encoder which will read uncompressed data from the given - /// stream and emit the compressed stream. - pub fn new(r: R, level: ::Compression) -> EncoderReaderBuf { - EncoderReaderBuf { - obj: r, - data: Compress::new(level, false), - } - } - - /// Resets the state of this encoder entirely, swapping out the input - /// stream for another. - /// - /// This function will reset the internal state of this encoder and replace - /// the input stream with the one provided, returning the previous input - /// stream. Future data read from this encoder will be the compressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.data.reset(); - mem::replace(&mut self.obj, r) - } - - /// Acquires a reference to the underlying reader - pub fn get_ref(&self) -> &R { - &self.obj - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - &mut self.obj - } - - /// Consumes this encoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.obj - } -} - -impl Read for EncoderReaderBuf { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - zio::read(&mut self.obj, &mut self.data, buf) - } -} - -impl DecoderReader { - /// Creates a new decoder which will decompress data read from the given - /// stream. - pub fn new(r: R) -> DecoderReader { - DecoderReader::new_with_buf(r, vec![0; 32 * 1024]) - } - - /// Same as `new`, but the intermediate buffer for data is specified. - /// - /// Note that the capacity of the intermediate buffer is never increased, - /// and it is recommended for it to be large. - pub fn new_with_buf(r: R, buf: Vec) -> DecoderReader { - DecoderReader { - inner: DecoderReaderBuf::new(BufReader::with_buf(buf, r)) - } - } - - /// Resets the state of this decoder entirely, swapping out the input - /// stream for another. - /// - /// This will reset the internal state of this decoder and replace the - /// input stream with the one provided, returning the previous input - /// stream. Future data read from this decoder will be the decompressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.inner.data = Decompress::new(false); - self.inner.obj.reset(r) - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this decoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } - - /// Returns the number of bytes that the decompressor has consumed. - /// - /// Note that this will likely be smaller than what the decompressor - /// actually read from the underlying stream due to buffering. - pub fn total_in(&self) -> u64 { - self.inner.total_in() - } - - /// Returns the number of bytes that the decompressor has produced. - pub fn total_out(&self) -> u64 { - self.inner.total_out() - } -} - -impl Read for DecoderReader { - fn read(&mut self, into: &mut [u8]) -> io::Result { - self.inner.read(into) - } -} - -impl DecoderReaderBuf { - /// Creates a new decoder which will decompress data read from the given - /// stream. - pub fn new(r: R) -> DecoderReaderBuf { - DecoderReaderBuf { - obj: r, - data: Decompress::new(false), - } - } - - /// Resets the state of this decoder entirely, swapping out the input - /// stream for another. - /// - /// This will reset the internal state of this decoder and replace the - /// input stream with the one provided, returning the previous input - /// stream. Future data read from this decoder will be the decompressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.data = Decompress::new(false); - mem::replace(&mut self.obj, r) - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - &self.obj - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - &mut self.obj - } - - /// Consumes this decoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.obj - } - - /// Returns the number of bytes that the decompressor has consumed. - /// - /// Note that this will likely be smaller than what the decompressor - /// actually read from the underlying stream due to buffering. - pub fn total_in(&self) -> u64 { - self.data.total_in() - } - - /// Returns the number of bytes that the decompressor has produced. - pub fn total_out(&self) -> u64 { - self.data.total_out() - } -} - -impl Read for DecoderReaderBuf { - fn read(&mut self, into: &mut [u8]) -> io::Result { - zio::read(&mut self.obj, &mut self.data, into) - } -} - -impl DecoderWriter { - /// Creates a new decoder which will write uncompressed data to the stream. - /// - /// When this encoder is dropped or unwrapped the final pieces of data will - /// be flushed. - pub fn new(w: W) -> DecoderWriter { - DecoderWriter { - inner: zio::Writer::new(w, Decompress::new(false)), - } - } - - /// Resets the state of this decoder entirely, swapping out the output - /// stream for another. - /// - /// This function will finish encoding the current stream into the current - /// output stream before swapping out the two output streams. If the stream - /// cannot be finished an error is returned. - /// - /// This will then reset the internal state of this decoder and replace the - /// output stream with the one provided, returning the previous output - /// stream. Future data written to this decoder will be decompressed into - /// the output stream `w`. - pub fn reset(&mut self, w: W) -> io::Result { - try!(self.inner.finish()); - self.inner.data = Decompress::new(false); - Ok(self.inner.replace(w)) - } - - /// Consumes this encoder, flushing the output stream. - /// - /// This will flush the underlying data stream and then return the contained - /// writer if the flush succeeded. - pub fn finish(mut self) -> io::Result { - try!(self.inner.finish()); - Ok(self.inner.into_inner()) - } - - /// Returns the number of bytes that the decompressor has consumed for - /// decompression. - /// - /// Note that this will likely be smaller than the number of bytes - /// successfully written to this stream due to internal buffering. - pub fn total_in(&self) -> u64 { - self.inner.data.total_in() - } - - /// Returns the number of bytes that the decompressor has written to its - /// output stream. - pub fn total_out(&self) -> u64 { - self.inner.data.total_out() - } -} - -impl Write for DecoderWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -#[cfg(test)] -mod tests { - use std::io::prelude::*; - - use rand::{thread_rng, Rng}; - - use deflate::{EncoderWriter, EncoderReader, DecoderReader, DecoderWriter}; - use Compression::Default; - - #[test] - fn roundtrip() { - let mut real = Vec::new(); - let mut w = EncoderWriter::new(Vec::new(), Default); - let v = thread_rng().gen_iter::().take(1024).collect::>(); - for _ in 0..200 { - let to_write = &v[..thread_rng().gen_range(0, v.len())]; - real.extend(to_write.iter().map(|x| *x)); - w.write_all(to_write).unwrap(); - } - let result = w.finish().unwrap(); - let mut r = DecoderReader::new(&result[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == real); - } - - #[test] - fn drop_writes() { - let mut data = Vec::new(); - EncoderWriter::new(&mut data, Default).write_all(b"foo").unwrap(); - let mut r = DecoderReader::new(&data[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == b"foo"); - } - - #[test] - fn total_in() { - let mut real = Vec::new(); - let mut w = EncoderWriter::new(Vec::new(), Default); - let v = thread_rng().gen_iter::().take(1024).collect::>(); - for _ in 0..200 { - let to_write = &v[..thread_rng().gen_range(0, v.len())]; - real.extend(to_write.iter().map(|x| *x)); - w.write_all(to_write).unwrap(); - } - let mut result = w.finish().unwrap(); - - let result_len = result.len(); - - for _ in 0..200 { - result.extend(v.iter().map(|x| *x)); - } - - let mut r = DecoderReader::new(&result[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == real); - assert_eq!(r.total_in(), result_len as u64); - } - - #[test] - fn roundtrip2() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert_eq!(ret, v); - } - - #[test] - fn roundtrip3() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut w = EncoderWriter::new(DecoderWriter::new(Vec::new()), Default); - w.write_all(&v).unwrap(); - let w = w.finish().unwrap().finish().unwrap(); - assert!(w == v); - } - - #[test] - fn reset_writer() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut w = EncoderWriter::new(Vec::new(), Default); - w.write_all(&v).unwrap(); - let a = w.reset(Vec::new()).unwrap(); - w.write_all(&v).unwrap(); - let b = w.finish().unwrap(); - - let mut w = EncoderWriter::new(Vec::new(), Default); - w.write_all(&v).unwrap(); - let c = w.finish().unwrap(); - assert!(a == b && b == c); - } - - #[test] - fn reset_reader() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); - let mut r = EncoderReader::new(&v[..], Default); - r.read_to_end(&mut a).unwrap(); - r.reset(&v[..]); - r.read_to_end(&mut b).unwrap(); - - let mut r = EncoderReader::new(&v[..], Default); - r.read_to_end(&mut c).unwrap(); - assert!(a == b && b == c); - } - - #[test] - fn reset_decoder() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut w = EncoderWriter::new(Vec::new(), Default); - w.write_all(&v).unwrap(); - let data = w.finish().unwrap(); - - { - let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); - let mut r = DecoderReader::new(&data[..]); - r.read_to_end(&mut a).unwrap(); - r.reset(&data); - r.read_to_end(&mut b).unwrap(); - - let mut r = DecoderReader::new(&data[..]); - r.read_to_end(&mut c).unwrap(); - assert!(a == b && b == c && c == v); - } - - { - let mut w = DecoderWriter::new(Vec::new()); - w.write_all(&data).unwrap(); - let a = w.reset(Vec::new()).unwrap(); - w.write_all(&data).unwrap(); - let b = w.finish().unwrap(); - - let mut w = DecoderWriter::new(Vec::new()); - w.write_all(&data).unwrap(); - let c = w.finish().unwrap(); - assert!(a == b && b == c && c == v); - } - } - - #[test] - fn zero_length_read_with_data() { - let m = vec![3u8; 128 * 1024 + 1]; - let mut c = EncoderReader::new(&m[..], ::Compression::Default); - - let mut result = Vec::new(); - c.read_to_end(&mut result).unwrap(); - - let mut d = DecoderReader::new(&result[..]); - let mut data = Vec::new(); - assert!(d.read(&mut data).unwrap() == 0); - } -} diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/src/ffi.rs cargo-0.19.0/vendor/flate2-0.2.14/src/ffi.rs --- cargo-0.17.0/vendor/flate2-0.2.14/src/ffi.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/src/ffi.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -pub use self::imp::*; - -#[cfg(feature = "zlib")] -#[allow(bad_style)] -mod imp { - extern crate libz_sys as z; - use std::mem; - use libc::{c_int, size_t, c_ulong, c_uint, c_char}; - - pub use self::z::deflateEnd as mz_deflateEnd; - pub use self::z::inflateEnd as mz_inflateEnd; - pub use self::z::deflateReset as mz_deflateReset; - pub use self::z::deflate as mz_deflate; - pub use self::z::inflate as mz_inflate; - pub use self::z::z_stream as mz_stream; - - pub use self::z::Z_BLOCK as MZ_BLOCK; - pub use self::z::Z_BUF_ERROR as MZ_BUF_ERROR; - pub use self::z::Z_DATA_ERROR as MZ_DATA_ERROR; - pub use self::z::Z_DEFAULT_STRATEGY as MZ_DEFAULT_STRATEGY; - pub use self::z::Z_DEFLATED as MZ_DEFLATED; - pub use self::z::Z_FINISH as MZ_FINISH; - pub use self::z::Z_FULL_FLUSH as MZ_FULL_FLUSH; - pub use self::z::Z_NO_FLUSH as MZ_NO_FLUSH; - pub use self::z::Z_OK as MZ_OK; - pub use self::z::Z_PARTIAL_FLUSH as MZ_PARTIAL_FLUSH; - pub use self::z::Z_STREAM_END as MZ_STREAM_END; - pub use self::z::Z_SYNC_FLUSH as MZ_SYNC_FLUSH; - - pub const MZ_DEFAULT_WINDOW_BITS: c_int = 15; - - pub unsafe extern fn mz_crc32(crc: c_ulong, - ptr: *const u8, - len: size_t) -> c_ulong { - z::crc32(crc, ptr, len as c_uint) - } - - const ZLIB_VERSION: &'static str = "1.2.8\0"; - - pub unsafe extern fn mz_deflateInit2(stream: *mut mz_stream, - level: c_int, - method: c_int, - window_bits: c_int, - mem_level: c_int, - strategy: c_int) -> c_int { - z::deflateInit2_(stream, level, method, window_bits, mem_level, - strategy, - ZLIB_VERSION.as_ptr() as *const c_char, - mem::size_of::() as c_int) - } - pub unsafe extern fn mz_inflateInit2(stream: *mut mz_stream, - window_bits: c_int) - -> c_int { - z::inflateInit2_(stream, window_bits, - ZLIB_VERSION.as_ptr() as *const c_char, - mem::size_of::() as c_int) - } -} - -#[cfg(not(feature = "zlib"))] -mod imp { - extern crate miniz_sys; - - pub use self::miniz_sys::*; -} diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/src/gz.rs cargo-0.19.0/vendor/flate2-0.2.14/src/gz.rs --- cargo-0.17.0/vendor/flate2-0.2.14/src/gz.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/src/gz.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,654 +0,0 @@ -//! gzip compression/decompression -//! -//! [1]: http://www.gzip.org/zlib/rfc-gzip.html - -use std::cmp; -use std::env; -use std::ffi::CString; -use std::io::prelude::*; -use std::io; - -use {Compression, Compress}; -use bufreader::BufReader; -use crc::{CrcReader, Crc}; -use deflate; -use zio; - -static FHCRC: u8 = 1 << 1; -static FEXTRA: u8 = 1 << 2; -static FNAME: u8 = 1 << 3; -static FCOMMENT: u8 = 1 << 4; - -/// A gzip streaming encoder -/// -/// This structure exposes a `Write` interface that will emit compressed data -/// to the underlying writer `W`. -pub struct EncoderWriter { - inner: zio::Writer, - crc: Crc, - header: Vec, -} - -/// A gzip streaming encoder -/// -/// This structure exposes a `Read` interface that will read uncompressed data -/// from the underlying reader and expose the compressed version as a `Read` -/// interface. -pub struct EncoderReader { - inner: EncoderReaderBuf>, -} - -/// A gzip streaming encoder -/// -/// This structure exposes a `Read` interface that will read uncompressed data -/// from the underlying reader and expose the compressed version as a `Read` -/// interface. -pub struct EncoderReaderBuf { - inner: deflate::EncoderReaderBuf>, - header: Vec, - pos: usize, - eof: bool, -} - -/// A builder structure to create a new gzip Encoder. -/// -/// This structure controls header configuration options such as the filename. -pub struct Builder { - extra: Option>, - filename: Option, - comment: Option, - mtime: u32, -} - -/// A gzip streaming decoder -/// -/// This structure exposes a `Read` interface that will consume compressed -/// data from the underlying reader and emit uncompressed data. -pub struct DecoderReader { - inner: DecoderReaderBuf>, -} - -/// A gzip streaming decoder -/// -/// This structure exposes a `Read` interface that will consume compressed -/// data from the underlying reader and emit uncompressed data. -pub struct DecoderReaderBuf { - inner: CrcReader>, - header: Header, - finished: bool, -} - -/// A structure representing the header of a gzip stream. -/// -/// The header can contain metadata about the file that was compressed, if -/// present. -pub struct Header { - extra: Option>, - filename: Option>, - comment: Option>, - mtime: u32, -} - -impl Builder { - /// Create a new blank builder with no header by default. - pub fn new() -> Builder { - Builder { - extra: None, - filename: None, - comment: None, - mtime: 0, - } - } - - /// Configure the `mtime` field in the gzip header. - pub fn mtime(mut self, mtime: u32) -> Builder { - self.mtime = mtime; - self - } - - /// Configure the `extra` field in the gzip header. - pub fn extra(mut self, extra: Vec) -> Builder { - self.extra = Some(extra); - self - } - - /// Configure the `filename` field in the gzip header. - pub fn filename(mut self, filename: &[u8]) -> Builder { - self.filename = Some(CString::new(filename).unwrap()); - self - } - - /// Configure the `comment` field in the gzip header. - pub fn comment(mut self, comment: &[u8]) -> Builder { - self.comment = Some(CString::new(comment).unwrap()); - self - } - - /// Consume this builder, creating a writer encoder in the process. - /// - /// The data written to the returned encoder will be compressed and then - /// written out to the supplied parameter `w`. - pub fn write(self, w: W, lvl: Compression) -> EncoderWriter { - EncoderWriter { - inner: zio::Writer::new(w, Compress::new(lvl, false)), - crc: Crc::new(), - header: self.into_header(lvl), - } - } - - /// Consume this builder, creating a reader encoder in the process. - /// - /// Data read from the returned encoder will be the compressed version of - /// the data read from the given reader. - pub fn read(self, r: R, lvl: Compression) -> EncoderReader { - EncoderReader { - inner: self.buf_read(BufReader::new(r), lvl), - } - } - - /// Consume this builder, creating a reader encoder in the process. - /// - /// Data read from the returned encoder will be the compressed version of - /// the data read from the given reader. - pub fn buf_read(self, r: R, lvl: Compression) -> EncoderReaderBuf - where R: BufRead - { - let crc = CrcReader::new(r); - EncoderReaderBuf { - inner: deflate::EncoderReaderBuf::new(crc, lvl), - header: self.into_header(lvl), - pos: 0, - eof: false, - } - } - - fn into_header(self, lvl: Compression) -> Vec { - let Builder { extra, filename, comment, mtime } = self; - let mut flg = 0; - let mut header = vec![0u8; 10]; - match extra { - Some(v) => { - flg |= FEXTRA; - header.push((v.len() >> 0) as u8); - header.push((v.len() >> 8) as u8); - header.extend(v); - } - None => {} - } - match filename { - Some(filename) => { - flg |= FNAME; - header.extend(filename.as_bytes_with_nul().iter().map(|x| *x)); - } - None => {} - } - match comment { - Some(comment) => { - flg |= FCOMMENT; - header.extend(comment.as_bytes_with_nul().iter().map(|x| *x)); - } - None => {} - } - header[0] = 0x1f; - header[1] = 0x8b; - header[2] = 8; - header[3] = flg; - header[4] = (mtime >> 0) as u8; - header[5] = (mtime >> 8) as u8; - header[6] = (mtime >> 16) as u8; - header[7] = (mtime >> 24) as u8; - header[8] = match lvl { - Compression::Best => 2, - Compression::Fast => 4, - _ => 0, - }; - header[9] = match env::consts::OS { - "linux" => 3, - "macos" => 7, - "win32" => 0, - _ => 255, - }; - return header; - } -} - -impl EncoderWriter { - /// Creates a new encoder which will use the given compression level. - /// - /// The encoder is not configured specially for the emitted header. For - /// header configuration, see the `Builder` type. - /// - /// The data written to the returned encoder will be compressed and then - /// written to the stream `w`. - pub fn new(w: W, level: Compression) -> EncoderWriter { - Builder::new().write(w, level) - } - - /// Finish encoding this stream, returning the underlying writer once the - /// encoding is done. - pub fn finish(mut self) -> io::Result { - try!(self.do_finish()); - Ok(self.inner.take_inner().unwrap()) - } - - fn do_finish(&mut self) -> io::Result<()> { - if self.header.len() != 0 { - try!(self.inner.get_mut().unwrap().write_all(&self.header)); - } - try!(self.inner.finish()); - let mut inner = self.inner.get_mut().unwrap(); - let (sum, amt) = (self.crc.sum() as u32, self.crc.amt_as_u32()); - let buf = [(sum >> 0) as u8, - (sum >> 8) as u8, - (sum >> 16) as u8, - (sum >> 24) as u8, - (amt >> 0) as u8, - (amt >> 8) as u8, - (amt >> 16) as u8, - (amt >> 24) as u8]; - inner.write_all(&buf) - } -} - -impl Write for EncoderWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - if self.header.len() != 0 { - try!(self.inner.get_mut().unwrap().write_all(&self.header)); - self.header.truncate(0); - } - let n = try!(self.inner.write(buf)); - self.crc.update(&buf[..n]); - Ok(n) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -impl Drop for EncoderWriter { - fn drop(&mut self) { - if self.inner.get_mut().is_some() { - let _ = self.do_finish(); - } - } -} - -impl EncoderReader { - /// Creates a new encoder which will use the given compression level. - /// - /// The encoder is not configured specially for the emitted header. For - /// header configuration, see the `Builder` type. - /// - /// The data read from the stream `r` will be compressed and available - /// through the returned reader. - pub fn new(r: R, level: Compression) -> EncoderReader { - Builder::new().read(r, level) - } - - /// Returns the underlying stream, consuming this encoder - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } -} - -fn copy(into: &mut [u8], from: &[u8], pos: &mut usize) -> usize { - let min = cmp::min(into.len(), from.len() - *pos); - for (slot, val) in into.iter_mut().zip(from[*pos..*pos + min].iter()) { - *slot = *val; - } - *pos += min; - return min; -} - -impl Read for EncoderReader { - fn read(&mut self, mut into: &mut [u8]) -> io::Result { - self.inner.read(into) - } -} - -impl EncoderReaderBuf { - /// Creates a new encoder which will use the given compression level. - /// - /// The encoder is not configured specially for the emitted header. For - /// header configuration, see the `Builder` type. - /// - /// The data read from the stream `r` will be compressed and available - /// through the returned reader. - pub fn new(r: R, level: Compression) -> EncoderReaderBuf { - Builder::new().buf_read(r, level) - } - - /// Returns the underlying stream, consuming this encoder - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } - - fn read_footer(&mut self, into: &mut [u8]) -> io::Result { - if self.pos == 8 { - return Ok(0); - } - let crc = self.inner.get_ref().crc(); - let ref arr = [(crc.sum() >> 0) as u8, - (crc.sum() >> 8) as u8, - (crc.sum() >> 16) as u8, - (crc.sum() >> 24) as u8, - (crc.amt_as_u32() >> 0) as u8, - (crc.amt_as_u32() >> 8) as u8, - (crc.amt_as_u32() >> 16) as u8, - (crc.amt_as_u32() >> 24) as u8]; - Ok(copy(into, arr, &mut self.pos)) - } -} - -impl Read for EncoderReaderBuf { - fn read(&mut self, mut into: &mut [u8]) -> io::Result { - let mut amt = 0; - if self.eof { - return self.read_footer(into); - } else if self.pos < self.header.len() { - amt += copy(into, &self.header, &mut self.pos); - if amt == into.len() { - return Ok(amt); - } - let tmp = into; - into = &mut tmp[amt..]; - } - match try!(self.inner.read(into)) { - 0 => { - self.eof = true; - self.pos = 0; - self.read_footer(into) - } - n => Ok(amt + n), - } - } -} - -impl DecoderReader { - /// Creates a new decoder from the given reader, immediately parsing the - /// gzip header. - /// - /// If an error is encountered when parsing the gzip header, an error is - /// returned. - pub fn new(r: R) -> io::Result> { - DecoderReaderBuf::new(BufReader::new(r)).map(|r| { - DecoderReader { inner: r } - }) - } - - /// Returns the header associated with this stream. - pub fn header(&self) -> &Header { - self.inner.header() - } -} - -impl Read for DecoderReader { - fn read(&mut self, into: &mut [u8]) -> io::Result { - self.inner.read(into) - } -} - -impl DecoderReaderBuf { - /// Creates a new decoder from the given reader, immediately parsing the - /// gzip header. - /// - /// If an error is encountered when parsing the gzip header, an error is - /// returned. - pub fn new(r: R) -> io::Result> { - let mut crc_reader = CrcReader::new(r); - let mut header = [0; 10]; - try!(crc_reader.read_exact(&mut header)); - - let id1 = header[0]; - let id2 = header[1]; - if id1 != 0x1f || id2 != 0x8b { - return Err(bad_header()); - } - let cm = header[2]; - if cm != 8 { - return Err(bad_header()); - } - - let flg = header[3]; - let mtime = ((header[4] as u32) << 0) | ((header[5] as u32) << 8) | - ((header[6] as u32) << 16) | - ((header[7] as u32) << 24); - let _xfl = header[8]; - let _os = header[9]; - - let extra = if flg & FEXTRA != 0 { - let xlen = try!(read_le_u16(&mut crc_reader)); - let mut extra = vec![0; xlen as usize]; - try!(crc_reader.read_exact(&mut extra)); - Some(extra) - } else { - None - }; - let filename = if flg & FNAME != 0 { - // wow this is slow - let mut b = Vec::new(); - for byte in crc_reader.by_ref().bytes() { - let byte = try!(byte); - if byte == 0 { - break; - } - b.push(byte); - } - Some(b) - } else { - None - }; - let comment = if flg & FCOMMENT != 0 { - // wow this is slow - let mut b = Vec::new(); - for byte in crc_reader.by_ref().bytes() { - let byte = try!(byte); - if byte == 0 { - break; - } - b.push(byte); - } - Some(b) - } else { - None - }; - - if flg & FHCRC != 0 { - let calced_crc = crc_reader.crc().sum() as u16; - let stored_crc = try!(read_le_u16(&mut crc_reader)); - if calced_crc != stored_crc { - return Err(corrupt()); - } - } - - let flate = deflate::DecoderReaderBuf::new(crc_reader.into_inner()); - return Ok(DecoderReaderBuf { - inner: CrcReader::new(flate), - header: Header { - extra: extra, - filename: filename, - comment: comment, - mtime: mtime, - }, - finished: false, - }); - - fn bad_header() -> io::Error { - io::Error::new(io::ErrorKind::InvalidInput, "invalid gzip header") - } - - fn read_le_u16(r: &mut R) -> io::Result { - let mut b = [0; 2]; - try!(r.read_exact(&mut b)); - Ok((b[0] as u16) | ((b[1] as u16) << 8)) - } - } - - /// Returns the header associated with this stream. - pub fn header(&self) -> &Header { - &self.header - } - - fn finish(&mut self) -> io::Result<()> { - if self.finished { - return Ok(()); - } - let ref mut buf = [0u8; 8]; - { - let mut len = 0; - - while len < buf.len() { - match try!(self.inner.inner().get_mut().read(&mut buf[len..])) { - 0 => return Err(corrupt()), - n => len += n, - } - } - } - - let crc = ((buf[0] as u32) << 0) | ((buf[1] as u32) << 8) | - ((buf[2] as u32) << 16) | - ((buf[3] as u32) << 24); - let amt = ((buf[4] as u32) << 0) | ((buf[5] as u32) << 8) | - ((buf[6] as u32) << 16) | - ((buf[7] as u32) << 24); - if crc != self.inner.crc().sum() as u32 { - return Err(corrupt()); - } - if amt != self.inner.crc().amt_as_u32() { - return Err(corrupt()); - } - self.finished = true; - Ok(()) - } -} - -impl Read for DecoderReaderBuf { - fn read(&mut self, into: &mut [u8]) -> io::Result { - match try!(self.inner.read(into)) { - 0 => { - try!(self.finish()); - Ok(0) - } - n => Ok(n), - } - } -} - -impl Header { - /// Returns the `filename` field of this gzip stream's header, if present. - pub fn filename(&self) -> Option<&[u8]> { - self.filename.as_ref().map(|s| &s[..]) - } - - /// Returns the `extra` field of this gzip stream's header, if present. - pub fn extra(&self) -> Option<&[u8]> { - self.extra.as_ref().map(|s| &s[..]) - } - - /// Returns the `comment` field of this gzip stream's header, if present. - pub fn comment(&self) -> Option<&[u8]> { - self.comment.as_ref().map(|s| &s[..]) - } - - /// Returns the `mtime` field of this gzip stream's header, if present. - pub fn mtime(&self) -> u32 { - self.mtime - } -} - -fn corrupt() -> io::Error { - io::Error::new(io::ErrorKind::InvalidInput, - "corrupt gzip stream does not have a matching checksum") -} - -#[cfg(test)] -mod tests { - use std::io::prelude::*; - - use super::{EncoderWriter, EncoderReader, DecoderReader, Builder}; - use Compression::Default; - use rand::{thread_rng, Rng}; - - #[test] - fn roundtrip() { - let mut e = EncoderWriter::new(Vec::new(), Default); - e.write_all(b"foo bar baz").unwrap(); - let inner = e.finish().unwrap(); - let mut d = DecoderReader::new(&inner[..]).unwrap(); - let mut s = String::new(); - d.read_to_string(&mut s).unwrap(); - assert_eq!(s, "foo bar baz"); - } - - #[test] - fn roundtrip_zero() { - let e = EncoderWriter::new(Vec::new(), Default); - let inner = e.finish().unwrap(); - let mut d = DecoderReader::new(&inner[..]).unwrap(); - let mut s = String::new(); - d.read_to_string(&mut s).unwrap(); - assert_eq!(s, ""); - } - - #[test] - fn roundtrip_big() { - let mut real = Vec::new(); - let mut w = EncoderWriter::new(Vec::new(), Default); - let v = thread_rng().gen_iter::().take(1024).collect::>(); - for _ in 0..200 { - let to_write = &v[..thread_rng().gen_range(0, v.len())]; - real.extend(to_write.iter().map(|x| *x)); - w.write_all(to_write).unwrap(); - } - let result = w.finish().unwrap(); - let mut r = DecoderReader::new(&result[..]).unwrap(); - let mut v = Vec::new(); - r.read_to_end(&mut v).unwrap(); - assert!(v == real); - } - - #[test] - fn roundtrip_big2() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)) - .unwrap(); - let mut res = Vec::new(); - r.read_to_end(&mut res).unwrap(); - assert!(res == v); - } - - #[test] - fn fields() { - let r = vec![0, 2, 4, 6]; - let e = Builder::new() - .filename(b"foo.rs") - .comment(b"bar") - .extra(vec![0, 1, 2, 3]) - .read(&r[..], Default); - let mut d = DecoderReader::new(e).unwrap(); - assert_eq!(d.header().filename(), Some(&b"foo.rs"[..])); - assert_eq!(d.header().comment(), Some(&b"bar"[..])); - assert_eq!(d.header().extra(), Some(&b"\x00\x01\x02\x03"[..])); - let mut res = Vec::new(); - d.read_to_end(&mut res).unwrap(); - assert_eq!(res, vec![0, 2, 4, 6]); - - } - - #[test] - fn keep_reading_after_end() { - let mut e = EncoderWriter::new(Vec::new(), Default); - e.write_all(b"foo bar baz").unwrap(); - let inner = e.finish().unwrap(); - let mut d = DecoderReader::new(&inner[..]).unwrap(); - let mut s = String::new(); - d.read_to_string(&mut s).unwrap(); - assert_eq!(s, "foo bar baz"); - d.read_to_string(&mut s).unwrap(); - assert_eq!(s, "foo bar baz"); - } -} diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/src/lib.rs cargo-0.19.0/vendor/flate2-0.2.14/src/lib.rs --- cargo-0.17.0/vendor/flate2-0.2.14/src/lib.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,211 +0,0 @@ -//! A DEFLATE-based stream compression/decompression library -//! -//! This library is meant to supplement/replace the standard distributon's -//! libflate library by providing a streaming encoder/decoder rather than purely -//! in in-memory encoder/decoder. -//! -//! Like with libflate, flate2 is based on [`miniz.c`][1] -//! -//! [1]: https://code.google.com/p/miniz/ -//! -//! # Organization -//! -//! This crate consists mainly of two modules, `reader` and `writer`. Each -//! module contains a number of types used to encode and decode various streams -//! of data. All types in the `writer` module work on instances of `Writer`, -//! whereas all types in the `reader` module work on instances of `Reader`. -//! -//! Other various types are provided at the top-level of the crate for -//! mangement and dealing with encoders/decoders. -//! -//! # Helper traits -//! -//! There are two helper traits, provided, `FlateReader` and `FlateWriter`. -//! These provide convenience methods for creating a decoder/encoder out of an -//! already existing stream to chain construction. - -#![doc(html_root_url = "http://alexcrichton.com/flate2-rs")] -#![deny(missing_docs)] -#![allow(trivial_numeric_casts)] -#![cfg_attr(test, deny(warnings))] - -extern crate libc; -#[cfg(test)] -extern crate rand; - -use std::io::prelude::*; -use std::io; - -pub use gz::Builder as GzBuilder; -pub use gz::Header as GzHeader; -pub use mem::{Compress, Decompress, DataError, Status, Flush}; - -mod bufreader; -mod crc; -mod deflate; -mod ffi; -mod gz; -mod zio; -mod mem; -mod zlib; - -/// Types which operate over `Read` streams, both encoders and decoders for -/// various formats. -pub mod read { - pub use deflate::EncoderReader as DeflateEncoder; - pub use deflate::DecoderReader as DeflateDecoder; - pub use zlib::EncoderReader as ZlibEncoder; - pub use zlib::DecoderReader as ZlibDecoder; - pub use gz::EncoderReader as GzEncoder; - pub use gz::DecoderReader as GzDecoder; -} - -/// Types which operate over `Write` streams, both encoders and decoders for -/// various formats. -pub mod write { - pub use deflate::EncoderWriter as DeflateEncoder; - pub use deflate::DecoderWriter as DeflateDecoder; - pub use zlib::EncoderWriter as ZlibEncoder; - pub use zlib::DecoderWriter as ZlibDecoder; - pub use gz::EncoderWriter as GzEncoder; -} - -/// Types which operate over `BufRead` streams, both encoders and decoders for -/// various formats. -pub mod bufread { - pub use deflate::EncoderReaderBuf as DeflateEncoder; - pub use deflate::DecoderReaderBuf as DeflateDecoder; - pub use zlib::EncoderReaderBuf as ZlibEncoder; - pub use zlib::DecoderReaderBuf as ZlibDecoder; - pub use gz::EncoderReaderBuf as GzEncoder; - pub use gz::DecoderReaderBuf as GzDecoder; -} - -fn _assert_send_sync() { - fn _assert_send_sync() {} - - _assert_send_sync::>(); - _assert_send_sync::>(); - _assert_send_sync::>(); - _assert_send_sync::>(); - _assert_send_sync::>(); - _assert_send_sync::>(); - _assert_send_sync::>>(); - _assert_send_sync::>>(); - _assert_send_sync::>>(); - _assert_send_sync::>>(); - _assert_send_sync::>>(); -} - -/// When compressing data, the compression level can be specified by a value in -/// this enum. -#[derive(Copy, Clone)] -pub enum Compression { - /// No compression is to be performed, this may actually inflate data - /// slightly when encoding. - None = 0, - /// Optimize for the best speed of encoding. - Fast = 1, - /// Optimize for the size of data being encoded. - Best = 9, - /// Choose the default compression, a balance between speed and size. - Default = 6, -} - -/// A helper trait to create encoder/decoders with method syntax. -pub trait FlateReadExt: Read + Sized { - /// Consume this reader to create a compression stream at the specified - /// compression level. - fn gz_encode(self, lvl: Compression) -> read::GzEncoder { - read::GzEncoder::new(self, lvl) - } - - /// Consume this reader to create a decompression stream of this stream. - fn gz_decode(self) -> io::Result> { - read::GzDecoder::new(self) - } - - /// Consume this reader to create a compression stream at the specified - /// compression level. - fn zlib_encode(self, lvl: Compression) -> read::ZlibEncoder { - read::ZlibEncoder::new(self, lvl) - } - - /// Consume this reader to create a decompression stream of this stream. - fn zlib_decode(self) -> read::ZlibDecoder { - read::ZlibDecoder::new(self) - } - - /// Consume this reader to create a compression stream at the specified - /// compression level. - fn deflate_encode(self, lvl: Compression) -> read::DeflateEncoder { - read::DeflateEncoder::new(self, lvl) - } - - /// Consume this reader to create a decompression stream of this stream. - fn deflate_decode(self) -> read::DeflateDecoder { - read::DeflateDecoder::new(self) - } -} - -/// A helper trait to create encoder/decoders with method syntax. -pub trait FlateWriteExt: Write + Sized { - /// Consume this writer to create a compression stream at the specified - /// compression level. - fn gz_encode(self, lvl: Compression) -> write::GzEncoder { - write::GzEncoder::new(self, lvl) - } - - // TODO: coming soon to a theater near you! - // /// Consume this writer to create a decompression stream of this stream. - // fn gz_decode(self) -> IoResult> { - // write::GzDecoder::new(self) - // } - - /// Consume this writer to create a compression stream at the specified - /// compression level. - fn zlib_encode(self, lvl: Compression) -> write::ZlibEncoder { - write::ZlibEncoder::new(self, lvl) - } - - /// Consume this writer to create a decompression stream of this stream. - fn zlib_decode(self) -> write::ZlibDecoder { - write::ZlibDecoder::new(self) - } - - /// Consume this writer to create a compression stream at the specified - /// compression level. - fn deflate_encode(self, lvl: Compression) -> write::DeflateEncoder { - write::DeflateEncoder::new(self, lvl) - } - - /// Consume this writer to create a decompression stream of this stream. - fn deflate_decode(self) -> write::DeflateDecoder { - write::DeflateDecoder::new(self) - } -} - -impl FlateReadExt for T {} -impl FlateWriteExt for T {} - -#[cfg(test)] -mod test { - use std::io::prelude::*; - use {FlateReadExt, Compression}; - - #[test] - fn crazy() { - let rdr = &mut b"foobar"; - let mut res = Vec::new(); - rdr.gz_encode(Compression::Default) - .deflate_encode(Compression::Default) - .zlib_encode(Compression::Default) - .zlib_decode() - .deflate_decode() - .gz_decode() - .unwrap() - .read_to_end(&mut res) - .unwrap(); - assert_eq!(res, b"foobar"); - } -} diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/src/mem.rs cargo-0.19.0/vendor/flate2-0.2.14/src/mem.rs --- cargo-0.17.0/vendor/flate2-0.2.14/src/mem.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/src/mem.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,369 +0,0 @@ -use std::error::Error; -use std::fmt; -use std::marker; -use std::mem; -use std::slice; - -use libc::{c_int, c_uint}; - -use Compression; -use ffi; - -/// Raw in-memory compression stream for blocks of data. -/// -/// This type is the building block for the I/O streams in the rest of this -/// crate. It requires more management than the `Read`/`Write` API but is -/// maximally flexible in terms of accepting input from any source and being -/// able to produce output to any memory location. -/// -/// It is recommended to use the I/O stream adaptors over this type as they're -/// easier to use. -pub struct Compress { - inner: Stream, -} - -/// Raw in-memory decompression stream for blocks of data. -/// -/// This type is the building block for the I/O streams in the rest of this -/// crate. It requires more management than the `Read`/`Write` API but is -/// maximally flexible in terms of accepting input from any source and being -/// able to produce output to any memory location. -/// -/// It is recommended to use the I/O stream adaptors over this type as they're -/// easier to use. -pub struct Decompress { - inner: Stream, -} - -struct Stream { - raw: ffi::mz_stream, - _marker: marker::PhantomData, -} - -unsafe impl Send for Stream {} -unsafe impl Sync for Stream {} - -trait Direction { - unsafe fn destroy(stream: *mut ffi::mz_stream) -> c_int; -} - -enum DirCompress {} -enum DirDecompress {} - -/// Values which indicate the form of flushing to be used when compressing or -/// decompressing in-memory data. -pub enum Flush { - /// A typical parameter for passing to compression/decompression functions, - /// this indicates that the underlying stream to decide how much data to - /// accumulate before producing output in order to maximize compression. - None = ffi::MZ_NO_FLUSH as isize, - - /// All pending output is flushed to the output buffer and the output is - /// aligned on a byte boundary so that the decompressor can get all input - /// data available so far. - /// - /// Flushing may degrade compression for some compression algorithms and so - /// it should only be used when necessary. This will complete the current - /// deflate block and follow it with an empty stored block. - Sync = ffi::MZ_SYNC_FLUSH as isize, - - /// All pending output is flushed to the output buffer, but the output is - /// not aligned to a byte boundary. - /// - /// All of the input data so far will be available to the decompressor (as - /// with `Flush::Sync`. This completes the current deflate block and follows - /// it with an empty fixed codes block that is 10 bites long, and it assures - /// that enough bytes are output in order for the decompessor to finish the - /// block before the empty fixed code block. - Partial = ffi::MZ_PARTIAL_FLUSH as isize, - - /// A deflate block is completed and emitted, as for `Flush::Sync`, but the - /// output is not aligned on a byte boundary and up to seven vits of the - /// current block are held to be written as the next byte after the next - /// deflate block is completed. - /// - /// In this case the decompressor may not be provided enough bits at this - /// point in order to complete decompression of the data provided so far to - /// the compressor, it may need to wait for the next block to be emitted. - /// This is for advanced applications that need to control the emission of - /// deflate blocks. - Block = ffi::MZ_BLOCK as isize, - - /// All output is flushed as with `Flush::Sync` and the compression state is - /// reset so decompression can restart from this point if previous - /// compressed data has been damaged or if random access is desired. - /// - /// Using this option too often can seriously degrade compression. - Full = ffi::MZ_FULL_FLUSH as isize, - - /// Pending input is processed and pending output is flushed. - /// - /// The return value may indicate that the stream is not yet done and more - /// data has yet to be processed. - Finish = ffi::MZ_FINISH as isize, -} - -/// Error returned when a decompression object finds that the input stream of -/// bytes was not a valid input stream of bytes. -#[derive(Debug)] -pub struct DataError(()); - -/// Possible status results of compressing some data or successfully -/// decompressing a block of data. -pub enum Status { - /// Indicates success. - /// - /// Means that more input may be needed but isn't available - /// and/or there' smore output to be written but the output buffer is full. - Ok, - - /// Indicates that forward progress is not possible due to input or output - /// buffers being empty. - /// - /// For compression it means the input buffer needs some more data or the - /// output buffer needs to be freed up before trying again. - /// - /// For decompression this means that more input is needed to continue or - /// the output buffer isn't large enough to contain the result. The function - /// can be called again after fixing both. - BufError, - - /// Indicates that all input has been consumed and all output bytes have - /// been written. Decompression/compression should not be called again. - /// - /// For decompression with zlib streams the adler-32 of the decompressed - /// data has also been verified. - StreamEnd, -} - -impl Compress { - /// Creates a new object ready for compressing data that it's given. - /// - /// The `level` argument here indicates what level of compression is going - /// to be performed, and the `zlib_header` argument indicates whether the - /// output data should have a zlib header or not. - pub fn new(level: Compression, zlib_header: bool) -> Compress { - unsafe { - let mut state: ffi::mz_stream = mem::zeroed(); - let ret = ffi::mz_deflateInit2(&mut state, - level as c_int, - ffi::MZ_DEFLATED, - if zlib_header { - ffi::MZ_DEFAULT_WINDOW_BITS - } else { - -ffi::MZ_DEFAULT_WINDOW_BITS - }, - 9, - ffi::MZ_DEFAULT_STRATEGY); - debug_assert_eq!(ret, 0); - Compress { - inner: Stream { - raw: state, - _marker: marker::PhantomData, - }, - } - } - } - - /// Returns the total number of input bytes which have been processed by - /// this compression object. - pub fn total_in(&self) -> u64 { - self.inner.raw.total_in as u64 - } - - /// Returns the total number of output bytes which have been produced by - /// this compression object. - pub fn total_out(&self) -> u64 { - self.inner.raw.total_out as u64 - } - - /// Quickly resets this compressor without having to reallocate anything. - /// - /// This is equivalent to dropping this object and then creating a new one. - pub fn reset(&mut self) { - let rc = unsafe { ffi::mz_deflateReset(&mut self.inner.raw) }; - assert_eq!(rc, ffi::MZ_OK); - } - - /// Compresses the input data into the output, consuming only as much - /// input as needed and writing as much output as possible. - /// - /// The flush option can be any of the available flushing parameters. - /// - /// To learn how much data was consumed or how much output was produced, use - /// the `total_in` and `total_out` functions before/after this is called. - pub fn compress(&mut self, - input: &[u8], - output: &mut [u8], - flush: Flush) - -> Status { - self.inner.raw.next_in = input.as_ptr() as *mut _; - self.inner.raw.avail_in = input.len() as c_uint; - self.inner.raw.next_out = output.as_mut_ptr(); - self.inner.raw.avail_out = output.len() as c_uint; - unsafe { - match ffi::mz_deflate(&mut self.inner.raw, flush as c_int) { - ffi::MZ_OK => Status::Ok, - ffi::MZ_BUF_ERROR => Status::BufError, - ffi::MZ_STREAM_END => Status::StreamEnd, - c => panic!("unknown return code: {}", c), - } - } - } - - /// Compresses the input data into the extra space of the output, consuming - /// only as much input as needed and writing as much output as possible. - /// - /// This function has the same semantics as `compress`, except that the - /// length of `vec` is managed by this function. This will not reallocate - /// the vector provided or attempt to grow it, so space for the output must - /// be reserved in the output vector by the caller before calling this - /// function. - pub fn compress_vec(&mut self, - input: &[u8], - output: &mut Vec, - flush: Flush) - -> Status { - let cap = output.capacity(); - let len = output.len(); - - unsafe { - let before = self.total_out(); - let ret = { - let ptr = output.as_mut_ptr().offset(len as isize); - let out = slice::from_raw_parts_mut(ptr, cap - len); - self.compress(input, out, flush) - }; - output.set_len((self.total_out() - before) as usize + len); - return ret - } - } -} - -impl Decompress { - /// Creates a new object ready for decompressing data that it's given. - /// - /// The `zlib_header` argument indicates whether the input data is expected - /// to have a zlib header or not. - pub fn new(zlib_header: bool) -> Decompress { - unsafe { - let mut state: ffi::mz_stream = mem::zeroed(); - let ret = ffi::mz_inflateInit2(&mut state, - if zlib_header { - ffi::MZ_DEFAULT_WINDOW_BITS - } else { - -ffi::MZ_DEFAULT_WINDOW_BITS - }); - debug_assert_eq!(ret, 0); - Decompress { - inner: Stream { - raw: state, - _marker: marker::PhantomData, - }, - } - } - } - - /// Returns the total number of input bytes which have been processed by - /// this decompression object. - pub fn total_in(&self) -> u64 { - self.inner.raw.total_in as u64 - } - - /// Returns the total number of output bytes which have been produced by - /// this decompression object. - pub fn total_out(&self) -> u64 { - self.inner.raw.total_out as u64 - } - - /// Decompresses the input data into the output, consuming only as much - /// input as needed and writing as much output as possible. - /// - /// The flush option provided can either be `Flush::None`, `Flush::Sync`, - /// or `Flush::Finish`. If the first call passes `Flush::Finish` it is - /// assumed that the input and output buffers are both sized large enough to - /// decompress the entire stream in a single call. - /// - /// A flush value of `Flush::Finish` indicates that there are no more source - /// bytes available beside what's already in the input buffer, and the - /// output buffer is large enough to hold the rest of the decompressed data. - /// - /// To learn how much data was consumed or how much output was produced, use - /// the `total_in` and `total_out` functions before/after this is called. - pub fn decompress(&mut self, - input: &[u8], - output: &mut [u8], - flush: Flush) - -> Result { - self.inner.raw.next_in = input.as_ptr() as *mut u8; - self.inner.raw.avail_in = input.len() as c_uint; - self.inner.raw.next_out = output.as_mut_ptr(); - self.inner.raw.avail_out = output.len() as c_uint; - unsafe { - match ffi::mz_inflate(&mut self.inner.raw, flush as c_int) { - ffi::MZ_DATA_ERROR => Err(DataError(())), - ffi::MZ_OK => Ok(Status::Ok), - ffi::MZ_BUF_ERROR => Ok(Status::BufError), - ffi::MZ_STREAM_END => Ok(Status::StreamEnd), - c => panic!("unknown return code: {}", c), - } - } - } - - /// Decompresses the input data into the extra space in the output vector - /// specified by `output`. - /// - /// This function has the same semantics as `decompress`, except that the - /// length of `vec` is managed by this function. This will not reallocate - /// the vector provided or attempt to grow it, so space for the output must - /// be reserved in the output vector by the caller before calling this - /// function. - pub fn decompress_vec(&mut self, - input: &[u8], - output: &mut Vec, - flush: Flush) - -> Result { - let cap = output.capacity(); - let len = output.len(); - - unsafe { - let before = self.total_out(); - let ret = { - let ptr = output.as_mut_ptr().offset(len as isize); - let out = slice::from_raw_parts_mut(ptr, cap - len); - self.decompress(input, out, flush) - }; - output.set_len((self.total_out() - before) as usize + len); - return ret - } - } -} - -impl Error for DataError { - fn description(&self) -> &str { "deflate data error" } -} - -impl fmt::Display for DataError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.description().fmt(f) - } -} - -impl Direction for DirCompress { - unsafe fn destroy(stream: *mut ffi::mz_stream) -> c_int { - ffi::mz_deflateEnd(stream) - } -} -impl Direction for DirDecompress { - unsafe fn destroy(stream: *mut ffi::mz_stream) -> c_int { - ffi::mz_inflateEnd(stream) - } -} - -impl Drop for Stream { - fn drop(&mut self) { - unsafe { - let _ = D::destroy(&mut self.raw); - } - } -} diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/src/zio.rs cargo-0.19.0/vendor/flate2-0.2.14/src/zio.rs --- cargo-0.17.0/vendor/flate2-0.2.14/src/zio.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/src/zio.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,186 +0,0 @@ -use std::io::prelude::*; -use std::io; -use std::mem; - -use {Decompress, Compress, Status, Flush, DataError}; - -pub struct Writer { - obj: Option, - pub data: D, - buf: Vec, -} - -pub trait Ops { - fn total_in(&self) -> u64; - fn total_out(&self) -> u64; - fn run(&mut self, input: &[u8], output: &mut [u8], flush: Flush) - -> Result; - fn run_vec(&mut self, input: &[u8], output: &mut Vec, flush: Flush) - -> Result; -} - -impl Ops for Compress { - fn total_in(&self) -> u64 { self.total_in() } - fn total_out(&self) -> u64 { self.total_out() } - fn run(&mut self, input: &[u8], output: &mut [u8], flush: Flush) - -> Result { - Ok(self.compress(input, output, flush)) - } - fn run_vec(&mut self, input: &[u8], output: &mut Vec, flush: Flush) - -> Result { - Ok(self.compress_vec(input, output, flush)) - } -} - -impl Ops for Decompress { - fn total_in(&self) -> u64 { self.total_in() } - fn total_out(&self) -> u64 { self.total_out() } - fn run(&mut self, input: &[u8], output: &mut [u8], flush: Flush) - -> Result { - self.decompress(input, output, flush) - } - fn run_vec(&mut self, input: &[u8], output: &mut Vec, flush: Flush) - -> Result { - self.decompress_vec(input, output, flush) - } -} - -pub fn read(obj: &mut R, data: &mut D, dst: &mut [u8]) -> io::Result - where R: BufRead, D: Ops -{ - loop { - let (read, consumed, ret, eof); - { - let input = try!(obj.fill_buf()); - eof = input.is_empty(); - let before_out = data.total_out(); - let before_in = data.total_in(); - let flush = if eof {Flush::Finish} else {Flush::None}; - ret = data.run(input, dst, flush); - read = (data.total_out() - before_out) as usize; - consumed = (data.total_in() - before_in) as usize; - } - obj.consume(consumed); - - match ret { - // If we haven't ready any data and we haven't hit EOF yet, - // then we need to keep asking for more data because if we - // return that 0 bytes of data have been read then it will - // be interpreted as EOF. - Ok(Status::Ok) | - Ok(Status::BufError) if read == 0 && !eof && dst.len() > 0 => { - continue - } - Ok(Status::Ok) | - Ok(Status::BufError) | - Ok(Status::StreamEnd) => return Ok(read), - - Err(..) => return Err(io::Error::new(io::ErrorKind::InvalidInput, - "corrupt deflate stream")) - } - } -} - -impl Writer { - pub fn new(w: W, d: D) -> Writer { - Writer { - obj: Some(w), - data: d, - buf: Vec::with_capacity(32 * 1024), - } - } - - pub fn finish(&mut self) -> io::Result<()> { - loop { - try!(self.dump()); - - let before = self.data.total_out(); - self.data.run_vec(&[], &mut self.buf, Flush::Finish).unwrap(); - if before == self.data.total_out() { - return Ok(()) - } - } - } - - pub fn replace(&mut self, w: W) -> W { - self.buf.truncate(0); - mem::replace(&mut self.obj, Some(w)).unwrap() - } - - pub fn get_mut(&mut self) -> Option<&mut W> { - self.obj.as_mut() - } - - pub fn take_inner(&mut self) -> Option { - self.obj.take() - } - - pub fn into_inner(mut self) -> W { - self.take_inner().unwrap() - } - - fn dump(&mut self) -> io::Result<()> { - if self.buf.len() > 0 { - try!(self.obj.as_mut().unwrap().write_all(&self.buf)); - self.buf.truncate(0); - } - Ok(()) - } -} - -impl Write for Writer { - fn write(&mut self, buf: &[u8]) -> io::Result { - // miniz isn't guaranteed to actually write any of the buffer provided, - // it may be in a flushing mode where it's just giving us data before - // we're actually giving it any data. We don't want to spuriously return - // `Ok(0)` when possible as it will cause calls to write_all() to fail. - // As a result we execute this in a loop to ensure that we try our - // darndest to write the data. - loop { - try!(self.dump()); - - let before_in = self.data.total_in(); - let ret = self.data.run_vec(buf, &mut self.buf, Flush::None); - let written = (self.data.total_in() - before_in) as usize; - - if buf.len() > 0 && written == 0 && ret.is_ok() { - continue - } - return match ret { - Ok(Status::Ok) | - Ok(Status::BufError) | - Ok(Status::StreamEnd) => Ok(written), - - Err(..) => Err(io::Error::new(io::ErrorKind::InvalidInput, - "corrupt deflate stream")) - } - } - } - - fn flush(&mut self) -> io::Result<()> { - // Unfortunately miniz doesn't actually tell us when we're done with - // pulling out all the data from the internal stream. To remedy this we - // have to continually ask the stream for more memory until it doesn't - // give us a chunk of memory the same size as our own internal buffer, - // at which point we assume it's reached the end. - loop { - try!(self.dump()); - - let before = self.data.total_out(); - self.data.run_vec(&[], &mut self.buf, Flush::Sync).unwrap(); - if before == self.data.total_out() { - break - } - } - - self.obj.as_mut().unwrap().flush() - } -} - -impl Drop for Writer { - fn drop(&mut self) { - if self.obj.is_some() { - let _ = self.finish(); - } - } -} diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/src/zlib.rs cargo-0.19.0/vendor/flate2-0.2.14/src/zlib.rs --- cargo-0.17.0/vendor/flate2-0.2.14/src/zlib.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/src/zlib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,509 +0,0 @@ -//! ZLIB compression and decompression of streams - -use std::io::prelude::*; -use std::io; -use std::mem; - -use bufreader::BufReader; -use zio; -use {Compress, Decompress}; - -/// A ZLIB encoder, or compressor. -/// -/// This structure implements a `Write` interface and takes a stream of -/// uncompressed data, writing the compressed data to the wrapped writer. -pub struct EncoderWriter { - inner: zio::Writer, -} - -/// A ZLIB encoder, or compressor. -/// -/// This structure implements a `Read` interface and will read uncompressed -/// data from an underlying stream and emit a stream of compressed data. -pub struct EncoderReader { - inner: EncoderReaderBuf>, -} - -/// A ZLIB encoder, or compressor. -/// -/// This structure implements a `BufRead` interface and will read uncompressed -/// data from an underlying stream and emit a stream of compressed data. -pub struct EncoderReaderBuf { - obj: R, - data: Compress, -} - -/// A ZLIB decoder, or decompressor. -/// -/// This structure implements a `Read` interface and takes a stream of -/// compressed data as input, providing the decompressed data when read from. -pub struct DecoderReader { - inner: DecoderReaderBuf>, -} - -/// A ZLIB decoder, or decompressor. -/// -/// This structure implements a `BufRead` interface and takes a stream of -/// compressed data as input, providing the decompressed data when read from. -pub struct DecoderReaderBuf { - obj: R, - data: Decompress, -} - -/// A ZLIB decoder, or decompressor. -/// -/// This structure implements a `Write` and will emit a stream of decompressed -/// data when fed a stream of compressed data. -pub struct DecoderWriter { - inner: zio::Writer, -} - -impl EncoderWriter { - /// Creates a new encoder which will write compressed data to the stream - /// given at the given compression level. - /// - /// When this encoder is dropped or unwrapped the final pieces of data will - /// be flushed. - pub fn new(w: W, level: ::Compression) -> EncoderWriter { - EncoderWriter { - inner: zio::Writer::new(w, Compress::new(level, true)), - } - } - - /// Resets the state of this encoder entirely, swapping out the output - /// stream for another. - /// - /// This function will finish encoding the current stream into the current - /// output stream before swapping out the two output streams. If the stream - /// cannot be finished an error is returned. - /// - /// After the current stream has been finished, this will reset the internal - /// state of this encoder and replace the output stream with the one - /// provided, returning the previous output stream. Future data written to - /// this encoder will be the compressed into the stream `w` provided. - pub fn reset(&mut self, w: W) -> io::Result { - try!(self.inner.finish()); - self.inner.data.reset(); - Ok(self.inner.replace(w)) - } - - /// Consumes this encoder, flushing the output stream. - /// - /// This will flush the underlying data stream and then return the contained - /// writer if the flush succeeded. - pub fn finish(mut self) -> io::Result { - try!(self.inner.finish()); - Ok(self.inner.into_inner()) - } -} - -impl Write for EncoderWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -impl EncoderReader { - /// Creates a new encoder which will read uncompressed data from the given - /// stream and emit the compressed stream. - pub fn new(r: R, level: ::Compression) -> EncoderReader { - EncoderReader { - inner: EncoderReaderBuf::new(BufReader::new(r), level), - } - } - - /// Resets the state of this encoder entirely, swapping out the input - /// stream for another. - /// - /// This function will reset the internal state of this encoder and replace - /// the input stream with the one provided, returning the previous input - /// stream. Future data read from this encoder will be the compressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.inner.data.reset(); - self.inner.obj.reset(r) - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this encoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } -} - -impl Read for EncoderReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.read(buf) - } -} - -impl EncoderReaderBuf { - /// Creates a new encoder which will read uncompressed data from the given - /// stream and emit the compressed stream. - pub fn new(r: R, level: ::Compression) -> EncoderReaderBuf { - EncoderReaderBuf { - obj: r, - data: Compress::new(level, true), - } - } - - /// Resets the state of this encoder entirely, swapping out the input - /// stream for another. - /// - /// This function will reset the internal state of this encoder and replace - /// the input stream with the one provided, returning the previous input - /// stream. Future data read from this encoder will be the compressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.data.reset(); - mem::replace(&mut self.obj, r) - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - &self.obj - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - &mut self.obj - } - - /// Consumes this encoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.obj - } -} - -impl Read for EncoderReaderBuf { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - zio::read(&mut self.obj, &mut self.data, buf) - } -} - -impl DecoderReader { - /// Creates a new decoder which will decompress data read from the given - /// stream. - pub fn new(r: R) -> DecoderReader { - DecoderReader::new_with_buf(r, vec![0; 32 * 1024]) - } - - /// Same as `new`, but the intermediate buffer for data is specified. - /// - /// Note that the specified buffer will only be used up to its current - /// length. The buffer's capacity will also not grow over time. - pub fn new_with_buf(r: R, buf: Vec) -> DecoderReader { - DecoderReader { - inner: DecoderReaderBuf::new(BufReader::with_buf(buf, r)), - } - } - - /// Resets the state of this decoder entirely, swapping out the input - /// stream for another. - /// - /// This will reset the internal state of this decoder and replace the - /// input stream with the one provided, returning the previous input - /// stream. Future data read from this decoder will be the decompressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.inner.data = Decompress::new(true); - self.inner.obj.reset(r) - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - self.inner.get_ref().get_ref() - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - self.inner.get_mut().get_mut() - } - - /// Consumes this decoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.inner.into_inner().into_inner() - } - - /// Returns the number of bytes that the decompressor has consumed. - /// - /// Note that this will likely be smaller than what the decompressor - /// actually read from the underlying stream due to buffering. - pub fn total_in(&self) -> u64 { - self.inner.total_in() - } - - /// Returns the number of bytes that the decompressor has produced. - pub fn total_out(&self) -> u64 { - self.inner.total_out() - } -} - -impl Read for DecoderReader { - fn read(&mut self, into: &mut [u8]) -> io::Result { - self.inner.read(into) - } -} - -impl DecoderReaderBuf { - /// Creates a new decoder which will decompress data read from the given - /// stream. - pub fn new(r: R) -> DecoderReaderBuf { - DecoderReaderBuf { - obj: r, - data: Decompress::new(true), - } - } - - /// Resets the state of this decoder entirely, swapping out the input - /// stream for another. - /// - /// This will reset the internal state of this decoder and replace the - /// input stream with the one provided, returning the previous input - /// stream. Future data read from this decoder will be the decompressed - /// version of `r`'s data. - pub fn reset(&mut self, r: R) -> R { - self.data = Decompress::new(true); - mem::replace(&mut self.obj, r) - } - - /// Acquires a reference to the underlying stream - pub fn get_ref(&self) -> &R { - &self.obj - } - - /// Acquires a mutable reference to the underlying stream - /// - /// Note that mutation of the stream may result in surprising results if - /// this encoder is continued to be used. - pub fn get_mut(&mut self) -> &mut R { - &mut self.obj - } - - /// Consumes this decoder, returning the underlying reader. - pub fn into_inner(self) -> R { - self.obj - } - - /// Returns the number of bytes that the decompressor has consumed. - /// - /// Note that this will likely be smaller than what the decompressor - /// actually read from the underlying stream due to buffering. - pub fn total_in(&self) -> u64 { - self.data.total_in() - } - - /// Returns the number of bytes that the decompressor has produced. - pub fn total_out(&self) -> u64 { - self.data.total_out() - } -} - -impl Read for DecoderReaderBuf { - fn read(&mut self, into: &mut [u8]) -> io::Result { - zio::read(&mut self.obj, &mut self.data, into) - } -} - -impl DecoderWriter { - /// Creates a new decoder which will write uncompressed data to the stream. - /// - /// When this decoder is dropped or unwrapped the final pieces of data will - /// be flushed. - pub fn new(w: W) -> DecoderWriter { - DecoderWriter { - inner: zio::Writer::new(w, Decompress::new(true)), - } - } - - /// Resets the state of this decoder entirely, swapping out the output - /// stream for another. - /// - /// This will reset the internal state of this decoder and replace the - /// output stream with the one provided, returning the previous output - /// stream. Future data written to this decoder will be decompressed into - /// the output stream `w`. - pub fn reset(&mut self, w: W) -> io::Result { - try!(self.inner.finish()); - self.inner.data = Decompress::new(true); - Ok(self.inner.replace(w)) - } - - /// Consumes this encoder, flushing the output stream. - /// - /// This will flush the underlying data stream and then return the contained - /// writer if the flush succeeded. - pub fn finish(mut self) -> io::Result { - try!(self.inner.finish()); - Ok(self.inner.into_inner()) - } - - /// Returns the number of bytes that the decompressor has consumed for - /// decompression. - /// - /// Note that this will likely be smaller than the number of bytes - /// successfully written to this stream due to internal buffering. - pub fn total_in(&self) -> u64 { - self.inner.data.total_in() - } - - /// Returns the number of bytes that the decompressor has written to its - /// output stream. - pub fn total_out(&self) -> u64 { - self.inner.data.total_out() - } -} - -impl Write for DecoderWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -#[cfg(test)] -mod tests { - use std::io::prelude::*; - - use rand::{thread_rng, Rng}; - - use zlib::{EncoderWriter, EncoderReader, DecoderReader, DecoderWriter}; - use Compression::Default; - - #[test] - fn roundtrip() { - let mut real = Vec::new(); - let mut w = EncoderWriter::new(Vec::new(), Default); - let v = thread_rng().gen_iter::().take(1024).collect::>(); - for _ in 0..200 { - let to_write = &v[..thread_rng().gen_range(0, v.len())]; - real.extend(to_write.iter().map(|x| *x)); - w.write_all(to_write).unwrap(); - } - let result = w.finish().unwrap(); - let mut r = DecoderReader::new(&result[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == real); - } - - #[test] - fn drop_writes() { - let mut data = Vec::new(); - EncoderWriter::new(&mut data, Default).write_all(b"foo").unwrap(); - let mut r = DecoderReader::new(&data[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == b"foo"); - } - - #[test] - fn total_in() { - let mut real = Vec::new(); - let mut w = EncoderWriter::new(Vec::new(), Default); - let v = thread_rng().gen_iter::().take(1024).collect::>(); - for _ in 0..200 { - let to_write = &v[..thread_rng().gen_range(0, v.len())]; - real.extend(to_write.iter().map(|x| *x)); - w.write_all(to_write).unwrap(); - } - let mut result = w.finish().unwrap(); - - let result_len = result.len(); - - for _ in 0..200 { - result.extend(v.iter().map(|x| *x)); - } - - let mut r = DecoderReader::new(&result[..]); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert!(ret == real); - assert_eq!(r.total_in(), result_len as u64); - } - - #[test] - fn roundtrip2() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)); - let mut ret = Vec::new(); - r.read_to_end(&mut ret).unwrap(); - assert_eq!(ret, v); - } - - #[test] - fn roundtrip3() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut w = EncoderWriter::new(DecoderWriter::new(Vec::new()), Default); - w.write_all(&v).unwrap(); - let w = w.finish().unwrap().finish().unwrap(); - assert!(w == v); - } - - #[test] - fn reset_decoder() { - let v = thread_rng() - .gen_iter::() - .take(1024 * 1024) - .collect::>(); - let mut w = EncoderWriter::new(Vec::new(), Default); - w.write_all(&v).unwrap(); - let data = w.finish().unwrap(); - - { - let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); - let mut r = DecoderReader::new(&data[..]); - r.read_to_end(&mut a).unwrap(); - r.reset(&data); - r.read_to_end(&mut b).unwrap(); - - let mut r = DecoderReader::new(&data[..]); - r.read_to_end(&mut c).unwrap(); - assert!(a == b && b == c && c == v); - } - - { - let mut w = DecoderWriter::new(Vec::new()); - w.write_all(&data).unwrap(); - let a = w.reset(Vec::new()).unwrap(); - w.write_all(&data).unwrap(); - let b = w.finish().unwrap(); - - let mut w = DecoderWriter::new(Vec::new()); - w.write_all(&data).unwrap(); - let c = w.finish().unwrap(); - assert!(a == b && b == c && c == v); - } - } -} Binary files /tmp/tmplxiqkI/U7kx02RAH_/cargo-0.17.0/vendor/flate2-0.2.14/tests/corrupt-file.gz and /tmp/tmplxiqkI/L4nBRNMLnv/cargo-0.19.0/vendor/flate2-0.2.14/tests/corrupt-file.gz differ Binary files /tmp/tmplxiqkI/U7kx02RAH_/cargo-0.17.0/vendor/flate2-0.2.14/tests/good-file.gz and /tmp/tmplxiqkI/L4nBRNMLnv/cargo-0.19.0/vendor/flate2-0.2.14/tests/good-file.gz differ diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/tests/good-file.txt cargo-0.19.0/vendor/flate2-0.2.14/tests/good-file.txt --- cargo-0.17.0/vendor/flate2-0.2.14/tests/good-file.txt 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/tests/good-file.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,733 +0,0 @@ -## ## -timestep simulated EIR patent hosts -0 0.136402 16855 -1 0.146872 18564 -2 0.150157 20334 -3 0.146358 22159 -4 0.136315 23655 -5 0.122354 24848 -6 0.104753 25887 -7 0.084439 26770 -8 0.06417 27238 -9 0.0450397 27349 -10 0.0295473 27274 -11 0.0184662 26909 -12 0.0110032 26324 -13 0.00634348 25513 -14 0.0036144 24469 -15 0.00208133 23383 -16 0.00122468 22345 -17 0.000752514 21342 -18 0.000545333 20416 -19 0.000546139 19657 -20 0.00054572 18806 -21 0.000545757 18015 -22 0.000545898 17349 -23 0.000546719 16594 -24 0.000547353 15955 -25 0.000547944 15374 -26 0.000547606 14765 -27 0.000594773 14212 -28 0.000969163 13677 -29 0.00168295 13180 -30 0.003059 12760 -31 0.00571599 12313 -32 0.0107918 11896 -33 0.0201943 11512 -34 0.0368013 11340 -35 0.0640629 11323 -36 0.104447 11769 -37 0.157207 12728 -38 0.216682 14261 -39 0.271159 16491 -40 0.303552 19274 -41 0.303678 22157 -42 0.271945 24875 -43 0.215445 27027 -44 0.154503 28690 -45 0.100717 30046 -46 0.0600343 30602 -47 0.0328576 30709 -48 0.016964 30315 -49 0.00841526 29310 -50 0.0040958 28058 -51 0.0019953 26662 -52 0.000986531 25259 -53 0.000545786 24049 -54 0.000546405 22966 -55 0.000546036 21933 -56 0.00054427 20953 -57 0.000542769 20057 -58 0.000541566 19304 -59 0.000541822 18477 -60 0.000541643 17695 -61 0.000541989 17002 -62 0.000769298 16391 -63 0.00150811 15805 -64 0.00295097 15172 -65 0.00566197 14690 -66 0.0105243 14206 -67 0.0186965 13791 -68 0.0313363 13470 -69 0.0490605 13377 -70 0.0711679 13631 -71 0.0953625 14209 -72 0.118026 15277 -73 0.134612 16760 -74 0.144311 18339 -75 0.146328 20124 -76 0.142936 21803 -77 0.134029 23435 -78 0.120562 24854 -79 0.103157 25880 -80 0.0834054 26597 -81 0.0632474 27226 -82 0.0447785 27294 -83 0.0295654 27169 -84 0.0184081 26803 -85 0.0109489 26265 -86 0.00631234 25375 -87 0.00359978 24306 -88 0.00206967 23260 -89 0.00122197 22225 -90 0.000751031 21277 -91 0.000544507 20295 -92 0.000543897 19417 -93 0.000543483 18623 -94 0.000542926 17837 -95 0.000542685 17070 -96 0.000542387 16424 -97 0.000541194 15838 -98 0.000540427 15177 -99 0.000540774 14608 -100 0.000588312 14066 -101 0.000959183 13499 -102 0.00166774 12979 -103 0.00303278 12545 -104 0.00567457 12067 -105 0.0107272 11712 -106 0.0200606 11368 -107 0.0364637 11207 -108 0.063339 11238 -109 0.103717 11660 -110 0.156884 12621 -111 0.217072 14151 -112 0.272311 16358 -113 0.305046 19005 -114 0.304927 21926 -115 0.272427 24662 -116 0.216478 27080 -117 0.155168 29064 -118 0.10079 30370 -119 0.0599659 30992 -120 0.0331287 30975 -121 0.017235 30317 -122 0.00860221 29455 -123 0.00419286 28172 -124 0.00203361 26809 -125 0.000998847 25476 -126 0.000551418 24230 -127 0.000551119 23106 -128 0.000552786 22147 -129 0.000553814 21183 -130 0.000553743 20280 -131 0.000554428 19423 -132 0.000555022 18598 -133 0.000555921 17864 -134 0.000556687 17187 -135 0.000789996 16527 -136 0.00154597 15870 -137 0.00302776 15226 -138 0.00581484 14685 -139 0.010812 14234 -140 0.0191832 13818 -141 0.0321572 13571 -142 0.050328 13538 -143 0.072817 13812 -144 0.0974321 14368 -145 0.120225 15436 -146 0.137418 16988 -147 0.147086 18775 -148 0.149165 20563 -149 0.144943 22223 -150 0.136631 23741 -151 0.123355 24920 -152 0.105401 25779 -153 0.0851918 26781 -154 0.0641702 27265 -155 0.0450746 27505 -156 0.0294136 27416 -157 0.0183811 27028 -158 0.0109285 26260 -159 0.00634296 25451 -160 0.00364513 24472 -161 0.0021051 23427 -162 0.00123693 22403 -163 0.000759531 21393 -164 0.000551727 20485 -165 0.000552256 19660 -166 0.000552303 18862 -167 0.000550927 18094 -168 0.000551098 17378 -169 0.000551093 16691 -170 0.000551885 16050 -171 0.000552282 15420 -172 0.000552591 14878 -173 0.00060109 14357 -174 0.000980446 13768 -175 0.00170301 13241 -176 0.003096 12745 -177 0.00579971 12294 -178 0.010976 11879 -179 0.0205422 11636 -180 0.0374515 11431 -181 0.0649916 11517 -182 0.106008 11966 -183 0.159983 12918 -184 0.221127 14484 -185 0.276503 16696 -186 0.310316 19518 -187 0.311205 22301 -188 0.276769 25047 -189 0.220506 27360 -190 0.159123 29133 -191 0.103761 30440 -192 0.0613797 31087 -193 0.033583 31037 -194 0.0173275 30555 -195 0.00861968 29617 -196 0.00419503 28292 -197 0.00203304 26944 -198 0.00100126 25569 -199 0.000553511 24349 -200 0.000554687 23257 -201 0.00055586 22204 -202 0.000555419 21176 -203 0.000556032 20316 -204 0.000555974 19509 -205 0.000556859 18746 -206 0.000556996 17978 -207 0.000557102 17288 -208 0.000790187 16672 -209 0.00154711 16057 -210 0.00303521 15449 -211 0.00584201 14915 -212 0.0108854 14397 -213 0.0193386 14010 -214 0.0324346 13730 -215 0.0507192 13674 -216 0.0736661 13874 -217 0.0987887 14515 -218 0.122411 15693 -219 0.139964 17265 -220 0.149125 18894 -221 0.151434 20662 -222 0.148067 22442 -223 0.138894 24116 -224 0.125436 25367 -225 0.107664 26360 -226 0.0865709 27044 -227 0.0655588 27428 -228 0.0459664 27714 -229 0.0301384 27687 -230 0.0186481 27262 -231 0.01103 26677 -232 0.00636957 25722 -233 0.00366188 24662 -234 0.00212213 23575 -235 0.00125358 22520 -236 0.000768665 21480 -237 0.000556393 20563 -238 0.000555892 19706 -239 0.00055534 18914 -240 0.000555027 18165 -241 0.000555062 17432 -242 0.000553766 16733 -243 0.000552984 16070 -244 0.000553634 15396 -245 0.000554286 14867 -246 0.000603759 14362 -247 0.000982974 13867 -248 0.00170532 13379 -249 0.00310471 12907 -250 0.00582577 12446 -251 0.0110122 12018 -252 0.0206284 11730 -253 0.0375835 11546 -254 0.0652192 11605 -255 0.10646 11981 -256 0.160858 12949 -257 0.223122 14478 -258 0.279678 16810 -259 0.312171 19452 -260 0.311778 22391 -261 0.276966 25204 -262 0.22251 27379 -263 0.159246 29248 -264 0.104109 30532 -265 0.0617903 30995 -266 0.0338421 31042 -267 0.0174647 30620 -268 0.00867821 29589 -269 0.00419968 28293 -270 0.00203244 26916 -271 0.00100204 25464 -272 0.000555586 24219 -273 0.000555599 23207 -274 0.00055582 22187 -275 0.00055516 21136 -276 0.000555436 20243 -277 0.000555618 19426 -278 0.000556778 18635 -279 0.000556976 17870 -280 0.000557162 17190 -281 0.0007904 16506 -282 0.00154557 15837 -283 0.00302973 15234 -284 0.00584543 14717 -285 0.0108796 14225 -286 0.0192919 13810 -287 0.032329 13605 -288 0.0505293 13536 -289 0.0733417 13760 -290 0.0982413 14378 -291 0.121477 15400 -292 0.138636 17017 -293 0.14875 18764 -294 0.150515 20516 -295 0.146372 22389 -296 0.137332 23975 -297 0.124076 25120 -298 0.106469 26137 -299 0.0862987 26973 -300 0.0650552 27584 -301 0.0456456 27741 -302 0.0300744 27565 -303 0.0187879 27212 -304 0.0112085 26432 -305 0.00648306 25501 -306 0.00370346 24466 -307 0.00213399 23472 -308 0.00125463 22415 -309 0.000765794 21427 -310 0.000552587 20533 -311 0.000553175 19632 -312 0.000553525 18831 -313 0.000554941 18119 -314 0.000556327 17336 -315 0.000556008 16721 -316 0.00055593 16086 -317 0.000556421 15516 -318 0.000557308 14918 -319 0.00060681 14402 -320 0.000990746 13849 -321 0.00172359 13355 -322 0.00313688 12902 -323 0.0058708 12425 -324 0.0110637 12087 -325 0.0206777 11743 -326 0.0376394 11531 -327 0.0656182 11582 -328 0.107414 12034 -329 0.162101 12955 -330 0.223525 14571 -331 0.279935 16842 -332 0.314601 19566 -333 0.313556 22575 -334 0.279571 25279 -335 0.221638 27642 -336 0.158038 29275 -337 0.102505 30638 -338 0.0608328 31209 -339 0.0335531 31260 -340 0.0173332 30520 -341 0.00861545 29604 -342 0.00419454 28370 -343 0.00202587 26940 -344 0.000994029 25614 -345 0.000549339 24445 -346 0.000551477 23239 -347 0.000552891 22300 -348 0.000551775 21280 -349 0.000552425 20424 -350 0.000552135 19571 -351 0.000552542 18753 -352 0.000552863 18058 -353 0.000554438 17348 -354 0.000786735 16671 -355 0.00153958 16047 -356 0.00301482 15500 -357 0.00580589 14883 -358 0.0108227 14347 -359 0.0192357 13947 -360 0.0321613 13672 -361 0.050229 13606 -362 0.0729462 13815 -363 0.0978564 14566 -364 0.120879 15674 -365 0.137663 17049 -366 0.147092 18813 -367 0.150184 20578 -368 0.146971 22245 -369 0.136769 23723 -370 0.12367 24905 -371 0.106187 25871 -372 0.0860921 26687 -373 0.0645899 27375 -374 0.0453473 27635 -375 0.0298122 27551 -376 0.0185448 27134 -377 0.0110517 26468 -378 0.00640294 25661 -379 0.00367011 24653 -380 0.00211832 23556 -381 0.00125246 22513 -382 0.00076891 21568 -383 0.000557384 20672 -384 0.000557295 19811 -385 0.000556837 18982 -386 0.000557433 18179 -387 0.000557376 17457 -388 0.000557751 16720 -389 0.000556844 16112 -390 0.000555603 15479 -391 0.000554871 14809 -392 0.00060335 14275 -393 0.000982808 13757 -394 0.00170757 13221 -395 0.00310351 12758 -396 0.0058181 12286 -397 0.010991 11906 -398 0.0205342 11557 -399 0.0373486 11393 -400 0.0647659 11487 -401 0.105589 11887 -402 0.15967 12798 -403 0.220945 14260 -404 0.277122 16477 -405 0.310108 19295 -406 0.308854 22110 -407 0.274911 24915 -408 0.218618 27273 -409 0.156618 29189 -410 0.101775 30572 -411 0.0607503 31174 -412 0.0334708 31316 -413 0.0173443 30731 -414 0.00865633 29636 -415 0.00421141 28342 -416 0.00204387 26991 -417 0.00100602 25595 -418 0.000555131 24336 -419 0.000555037 23251 -420 0.000555559 22267 -421 0.000554916 21212 -422 0.000554432 20306 -423 0.000554751 19488 -424 0.00055638 18727 -425 0.000556727 17927 -426 0.000556368 17198 -427 0.000788004 16578 -428 0.00154404 15944 -429 0.00302383 15315 -430 0.00582586 14786 -431 0.0108457 14290 -432 0.0192962 13815 -433 0.0323072 13561 -434 0.0505101 13456 -435 0.0732162 13811 -436 0.0978737 14403 -437 0.121405 15460 -438 0.138202 16993 -439 0.1482 18710 -440 0.149707 20578 -441 0.146945 22256 -442 0.137785 23713 -443 0.123767 25058 -444 0.105989 26087 -445 0.085483 26759 -446 0.0646144 27375 -447 0.0454389 27680 -448 0.0299337 27531 -449 0.018663 27041 -450 0.0111347 26416 -451 0.00644197 25614 -452 0.00369229 24666 -453 0.00211986 23647 -454 0.00124761 22650 -455 0.000769104 21642 -456 0.000558796 20693 -457 0.000559908 19746 -458 0.000559562 18952 -459 0.00056042 18100 -460 0.000559447 17401 -461 0.000557893 16756 -462 0.000557137 16148 -463 0.000557269 15504 -464 0.000557596 14974 -465 0.000606298 14408 -466 0.000987712 13909 -467 0.00171257 13402 -468 0.00311667 12891 -469 0.00584794 12433 -470 0.0110774 11980 -471 0.0207006 11713 -472 0.037673 11583 -473 0.0654988 11677 -474 0.106982 12072 -475 0.161926 12898 -476 0.224327 14548 -477 0.281709 16796 -478 0.314567 19512 -479 0.313419 22428 -480 0.278962 25186 -481 0.221864 27755 -482 0.158559 29556 -483 0.103532 30572 -484 0.0611592 31162 -485 0.0337539 31197 -486 0.0175096 30619 -487 0.00865906 29606 -488 0.00420125 28271 -489 0.00203207 26856 -490 0.00100238 25542 -491 0.000554405 24306 -492 0.00055373 23160 -493 0.0005552 22152 -494 0.000553776 21192 -495 0.000553636 20302 -496 0.000553165 19505 -497 0.000554014 18719 -498 0.00055519 17993 -499 0.000556582 17233 -500 0.000788165 16569 -501 0.00154132 15953 -502 0.00302099 15350 -503 0.00581186 14752 -504 0.0108291 14267 -505 0.0192368 13946 -506 0.0322191 13677 -507 0.0503789 13594 -508 0.0730706 13768 -509 0.0980646 14416 -510 0.121601 15634 -511 0.139046 17110 -512 0.147779 18876 -513 0.149612 20734 -514 0.145796 22414 -515 0.136936 23884 -516 0.123807 25078 -517 0.106212 26066 -518 0.0855482 26779 -519 0.0643386 27340 -520 0.0452926 27530 -521 0.0298659 27573 -522 0.0185447 27169 -523 0.0110178 26489 -524 0.00635235 25588 -525 0.00362881 24549 -526 0.00209238 23528 -527 0.00123133 22541 -528 0.000755917 21498 -529 0.000546368 20607 -530 0.000547382 19712 -531 0.000547084 18975 -532 0.000546453 18178 -533 0.000546062 17452 -534 0.000546085 16749 -535 0.000546151 16135 -536 0.000545628 15567 -537 0.000545969 14968 -538 0.000594606 14392 -539 0.000968849 13854 -540 0.00168489 13360 -541 0.00306337 12899 -542 0.00573505 12407 -543 0.0108348 12017 -544 0.02025 11713 -545 0.0368201 11517 -546 0.0639795 11556 -547 0.104882 11941 -548 0.158923 12854 -549 0.219796 14396 -550 0.275801 16733 -551 0.307622 19367 -552 0.30785 22230 -553 0.272898 24873 -554 0.217351 27152 -555 0.156138 29108 -556 0.101477 30379 -557 0.0601091 30971 -558 0.0331551 31126 -559 0.017167 30418 -560 0.00853886 29430 -561 0.00415201 28190 -562 0.00201849 26849 -563 0.000991957 25528 -564 0.000546751 24180 -565 0.00054534 23090 -566 0.000544403 22096 -567 0.00054368 21140 -568 0.000543407 20213 -569 0.000544421 19405 -570 0.000545241 18625 -571 0.000546995 17868 -572 0.000547101 17102 -573 0.00077428 16423 -574 0.00151348 15783 -575 0.00296212 15220 -576 0.00569555 14602 -577 0.0106307 14154 -578 0.0188783 13743 -579 0.0316572 13538 -580 0.0495211 13467 -581 0.0718936 13665 -582 0.0961304 14240 -583 0.119127 15341 -584 0.136233 16912 -585 0.145327 18567 -586 0.146983 20301 -587 0.143022 21953 -588 0.134931 23439 -589 0.121892 24750 -590 0.103955 25688 -591 0.0833804 26253 -592 0.0625106 26918 -593 0.0440419 27279 -594 0.0290823 27159 -595 0.0180758 26786 -596 0.0107654 26049 -597 0.00622673 25202 -598 0.00356716 24168 -599 0.00205866 23122 -600 0.00121254 22076 -601 0.000745744 21100 -602 0.000537789 20207 -603 0.000537982 19340 -604 0.000537795 18527 -605 0.000537955 17768 -606 0.000539259 17117 -607 0.00053942 16425 -608 0.000540477 15701 -609 0.000540424 15134 -610 0.000540084 14558 -611 0.00058571 14069 -612 0.00095364 13498 -613 0.00165505 13054 -614 0.00300205 12616 -615 0.00561724 12142 -616 0.0106079 11720 -617 0.0198178 11410 -618 0.0360368 11231 -619 0.0623418 11314 -620 0.101856 11688 -621 0.15376 12623 -622 0.213046 14078 -623 0.267285 16225 -624 0.299225 18856 -625 0.299517 21756 -626 0.26697 24652 -627 0.2119 27051 -628 0.151393 28925 -629 0.098869 30065 -630 0.0593653 30570 -631 0.0327177 30483 -632 0.0170081 29735 -633 0.0084493 28844 -634 0.00409333 27665 -635 0.00197466 26356 -636 0.000967996 25009 -637 0.000533137 23839 -638 0.000532992 22721 -639 0.000534258 21676 -640 0.000534251 20709 -641 0.000534556 19798 -642 0.000535287 19008 -643 0.000536214 18278 -644 0.000536647 17547 -645 0.000536556 16901 -646 0.000761043 16256 -647 0.00149108 15621 -648 0.00292808 15032 -649 0.0056527 14504 -650 0.0105421 14010 -651 0.0186823 13646 -652 0.0312164 13356 -653 0.0485643 13404 -654 0.0704061 13612 -655 0.0945219 14230 -656 0.117178 15374 -657 0.134568 16843 -658 0.144475 18492 -659 0.146915 20238 -660 0.14393 21958 -661 0.134621 23537 -662 0.121737 24773 -663 0.104744 25772 -664 0.0846226 26427 -665 0.0639754 27040 -666 0.0448457 27279 -667 0.029482 27106 -668 0.0183036 26853 -669 0.0108721 26178 -670 0.00627116 25425 -671 0.0035776 24326 -672 0.00206466 23279 -673 0.00122064 22191 -674 0.000751578 21231 -675 0.000542574 20323 -676 0.000540396 19496 -677 0.000538805 18651 -678 0.00053881 17920 -679 0.000537801 17217 -680 0.000537866 16520 -681 0.000538522 15876 -682 0.000538795 15229 -683 0.000539519 14656 -684 0.000587348 14121 -685 0.000955855 13626 -686 0.00165656 13086 -687 0.00301095 12666 -688 0.00564993 12250 -689 0.0106767 11869 -690 0.0199729 11524 -691 0.03641 11331 -692 0.0632378 11402 -693 0.103483 11788 -694 0.156399 12682 -695 0.215591 14337 -696 0.269462 16547 -697 0.303615 19239 -698 0.304506 22023 -699 0.273068 24769 -700 0.21682 27223 -701 0.154934 29029 -702 0.100495 30241 -703 0.0597382 30801 -704 0.0329221 30881 -705 0.0170591 30288 -706 0.00845353 29329 -707 0.00408176 28108 -708 0.00198037 26715 -709 0.000977102 25340 -710 0.000541566 24039 -711 0.000542333 22965 -712 0.000542417 21858 -713 0.000541182 20952 -714 0.00054038 20049 -715 0.000539725 19192 -716 0.000539603 18409 -717 0.000539754 17700 -718 0.000539679 16960 -719 0.000763508 16287 -720 0.00149327 15637 -721 0.00292609 15057 -722 0.00563308 14524 -723 0.0104893 14003 -724 0.0185874 13625 -725 0.0310985 13319 -726 0.0487417 13278 -727 0.0707124 13502 -728 0.0947795 14147 -729 0.117155 15183 -730 0.133995 16622 diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/tests/gunzip.rs cargo-0.19.0/vendor/flate2-0.2.14/tests/gunzip.rs --- cargo-0.17.0/vendor/flate2-0.2.14/tests/gunzip.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/tests/gunzip.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -extern crate flate2; - -use std::fs::File; -use std::io::prelude::*; -use std::io; -use std::path::Path; -use flate2::read::GzDecoder; - -// test extraction of a gzipped file -#[test] -fn test_extract_success() { - let content = extract_file(Path::new("tests/good-file.gz")).unwrap(); - let mut expected = Vec::new(); - File::open("tests/good-file.txt").unwrap().read_to_end(&mut expected).unwrap(); - assert!(content == expected); -} - -// test extraction fails on a corrupt file -#[test] -fn test_extract_failure() { - let result = extract_file(Path::new("tests/corrupt-file.gz")); - assert_eq!(result.err().unwrap().kind(), io::ErrorKind::InvalidInput); -} - -// Tries to extract path into memory (assuming a .gz file). -fn extract_file(path_compressed: &Path) -> io::Result>{ - let mut v = Vec::new(); - let f = try!(File::open(path_compressed)); - try!(try!(GzDecoder::new(f)).read_to_end(&mut v)); - Ok(v) -} diff -Nru cargo-0.17.0/vendor/flate2-0.2.14/.travis.yml cargo-0.19.0/vendor/flate2-0.2.14/.travis.yml --- cargo-0.17.0/vendor/flate2-0.2.14/.travis.yml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.14/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -language: rust -rust: - - stable - - beta - - nightly -sudo: false -before_script: - - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH -script: - - export CARGO_TARGET_DIR=`pwd`/target - - cargo build --verbose - - cargo test --verbose - - cargo test --verbose --features zlib - - cargo test --verbose --features zlib --no-default-features - - rustdoc --test README.md -L target/debug -L target/debug/deps - - cargo doc --no-deps - - cargo doc --no-deps --manifest-path=miniz-sys/Cargo.toml -after_success: - - travis-cargo --only nightly doc-upload -env: - global: - secure: tnY9uOzb+59QyxPwOkj64dYLhytJGEXCo3uMvlbFtBpNJ6B2bN+lFDLaILbki1xkIg6DOFLGGT0+2qLI295V8BgEOqs/bU1WNNTjCdIqhbYI+HrwPP2RocecIXCIrsmL7tVSqfdLnU8RCoS0CBvJOwX/f813UGn3yAP4k5l7K/U= -notifications: - email: - on_success: never -os: - - linux - - osx diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/appveyor.yml cargo-0.19.0/vendor/flate2-0.2.17/appveyor.yml --- cargo-0.17.0/vendor/flate2-0.2.17/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/appveyor.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,17 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc + - TARGET: i686-pc-windows-msvc + - TARGET: i686-pc-windows-gnu +install: + - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" + - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" + - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin + - SET PATH=%PATH%;C:\MinGW\bin + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test --verbose --target %TARGET% diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/.cargo-checksum.json cargo-0.19.0/vendor/flate2-0.2.17/.cargo-checksum.json --- cargo-0.17.0/vendor/flate2-0.2.17/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"276ab433e244a13d17e98664a2c61573d87a02975ba652bd79b7cad35cee658e","Cargo.toml":"d45e93809a752d0c25cf831575a04f43076d571d295b74c95511a2964ba8cf50","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"b875d1d0ca491813f3a2469ea69d37dd667ad0c70f895e6bbda540c99e291154","appveyor.yml":"3a74394c3e3ef3b2c7d9458f526e47587977e98d026b63f00be29479075ff597","src/bufreader.rs":"0a1213858056c8154066f45df7b261c052c6a2c55ec88bc21f56ad3f2748d8c4","src/crc.rs":"99a8dcdf1daf5ec0e2d27479136f94a5d1fddd52c4b4879dcc58060074b9422f","src/deflate.rs":"0e78f7fbdb1d781c668ad7edf963ca69577887ca2b73778e24edd1efa0a57fe1","src/ffi.rs":"8a0e5bfe93618faf84b82427f664e50fdffdc17b40b741cbcd20d3039d853f0d","src/gz.rs":"3f0662fca925b0d31b64cc59b4bad5bb749f573071ca549cb8dacc52fd052f19","src/lib.rs":"5ff0e630b777a08339ad5f90700972ee308886f925697698649b4fc5eb73072b","src/mem.rs":"4dfdea2c16566ad37bf921c08c9a71f46f5232a0b8e201dae01fe4f79b7cd117","src/zio.rs":"e04ac4778eecc0b8cd5d227e347646b4efd4b731667d3dd0a842ef04a768f127","src/zlib.rs":"8454cdbfe36b545e2762bd3c6dab4a4cd47ed1dae59f1866be162d8ffa8441f3","tests/corrupt-file.gz":"083dd284aa1621916a2d0f66ea048c8d3ba7a722b22d0d618722633f51e7d39c","tests/good-file.gz":"87296963e53024a74752179ce7e54087565d358a85d3e65c3b37ef36eaa3d4a6","tests/good-file.txt":"bc4e03658a441fe2ad2df7cd2197144b87e41696f01e327b380e869cd9b485a0","tests/gunzip.rs":"86de051daafeb306e3958f27a1ae2385b56ec87516951701ad4627d52408fe27"},"package":"d4e4d0c15ef829cbc1b7cda651746be19cceeb238be7b1049227b14891df9e25"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/Cargo.toml cargo-0.19.0/vendor/flate2-0.2.17/Cargo.toml --- cargo-0.17.0/vendor/flate2-0.2.17/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,30 @@ +[package] + +name = "flate2" +authors = ["Alex Crichton "] +version = "0.2.17" +license = "MIT/Apache-2.0" +readme = "README.md" +keywords = ["gzip", "flate", "zlib", "encoding"] +categories = ["compression", "api-bindings"] +repository = "https://github.com/alexcrichton/flate2-rs" +homepage = "https://github.com/alexcrichton/flate2-rs" +documentation = "https://docs.rs/flate2" +description = """ +Bindings to miniz.c for DEFLATE compression and decompression exposed as +Reader/Writer streams. Contains bindings for zlib, deflate, and gzip-based +streams. +""" + +[dependencies] +libc = "0.2" +miniz-sys = { path = "miniz-sys", version = "0.1.7", optional = true } +libz-sys = { version = "1.0", optional = true } + +[dev-dependencies] +rand = "0.3" +quickcheck = { version = "0.4", default-features = false } + +[features] +default = ["miniz-sys"] +zlib = ["libz-sys"] diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/.gitignore cargo-0.19.0/vendor/flate2-0.2.17/.gitignore --- cargo-0.17.0/vendor/flate2-0.2.17/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/.gitignore 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,2 @@ +target +Cargo.lock diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/LICENSE-APACHE cargo-0.19.0/vendor/flate2-0.2.17/LICENSE-APACHE --- cargo-0.17.0/vendor/flate2-0.2.17/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/LICENSE-APACHE 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/LICENSE-MIT cargo-0.19.0/vendor/flate2-0.2.17/LICENSE-MIT --- cargo-0.17.0/vendor/flate2-0.2.17/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/LICENSE-MIT 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/README.md cargo-0.19.0/vendor/flate2-0.2.17/README.md --- cargo-0.17.0/vendor/flate2-0.2.17/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/README.md 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,70 @@ +# flate2 + +[![Build Status](https://travis-ci.org/alexcrichton/flate2-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/flate2-rs) +[![Build status](https://ci.appveyor.com/api/projects/status/9tatexq47i3ee13k?svg=true)](https://ci.appveyor.com/project/alexcrichton/flate2-rs) + +[Documentation](https://docs.rs/flate2) + +A streaming compression/decompression library for Rust. The underlying +implementation by default uses [`miniz`](https://code.google.com/p/miniz/) but +can optionally be configured to use the system zlib, if available. + +Supported formats: + +* deflate +* zlib +* gzip + +```toml +# Cargo.toml +[dependencies] +flate2 = "0.2" +``` + +Using zlib instead of miniz: + +```toml +[dependencies] +flate2 = { version = "0.2", features = ["zlib"], default-features = false } +``` + +## Compression + +```rust +extern crate flate2; + +use std::io::prelude::*; +use flate2::Compression; +use flate2::write::ZlibEncoder; + +fn main() { + let mut e = ZlibEncoder::new(Vec::new(), Compression::Default); + e.write(b"foo"); + e.write(b"bar"); + let compressed_bytes = e.finish(); +} +``` + +## Decompression + +```rust,no_run +extern crate flate2; + +use std::io::prelude::*; +use flate2::read::GzDecoder; + +fn main() { + let mut d = GzDecoder::new("...".as_bytes()).unwrap(); + let mut s = String::new(); + d.read_to_string(&mut s).unwrap(); + println!("{}", s); +} +``` + +# License + +`flate2-rs` is primarily distributed under the terms of both the MIT license and +the Apache License (Version 2.0), with portions covered by various BSD-like +licenses. + +See LICENSE-APACHE, and LICENSE-MIT for details. diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/src/bufreader.rs cargo-0.19.0/vendor/flate2-0.2.17/src/bufreader.rs --- cargo-0.17.0/vendor/flate2-0.2.17/src/bufreader.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/src/bufreader.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,87 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cmp; +use std::io; +use std::io::prelude::*; +use std::mem; + +pub struct BufReader { + inner: R, + buf: Box<[u8]>, + pos: usize, + cap: usize, +} + +impl BufReader { + pub fn new(inner: R) -> BufReader { + BufReader::with_buf(vec![0; 32 * 1024], inner) + } + + pub fn with_buf(buf: Vec, inner: R) -> BufReader { + BufReader { + inner: inner, + buf: buf.into_boxed_slice(), + pos: 0, + cap: 0, + } + } + + pub fn get_ref(&self) -> &R { + &self.inner + } + + pub fn get_mut(&mut self) -> &mut R { + &mut self.inner + } + + pub fn into_inner(self) -> R { + self.inner + } + + pub fn reset(&mut self, inner: R) -> R { + self.pos = 0; + self.cap = 0; + mem::replace(&mut self.inner, inner) + } +} + +impl Read for BufReader { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + // If we don't have any buffered data and we're doing a massive read + // (larger than our internal buffer), bypass our internal buffer + // entirely. + if self.pos == self.cap && buf.len() >= self.buf.len() { + return self.inner.read(buf); + } + let nread = { + let mut rem = try!(self.fill_buf()); + try!(rem.read(buf)) + }; + self.consume(nread); + Ok(nread) + } +} + +impl BufRead for BufReader { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + // If we've reached the end of our internal buffer then we need to fetch + // some more data from the underlying reader. + if self.pos == self.cap { + self.cap = try!(self.inner.read(&mut self.buf)); + self.pos = 0; + } + Ok(&self.buf[self.pos..self.cap]) + } + + fn consume(&mut self, amt: usize) { + self.pos = cmp::min(self.pos + amt, self.cap); + } +} diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/src/crc.rs cargo-0.19.0/vendor/flate2-0.2.17/src/crc.rs --- cargo-0.17.0/vendor/flate2-0.2.17/src/crc.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/src/crc.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,79 @@ +//! Simple CRC bindings backed by miniz.c + +use std::io::prelude::*; +use std::io; +use libc; + +use ffi; + +pub struct Crc { + crc: libc::c_ulong, + amt: u32, +} + +pub struct CrcReader { + inner: R, + crc: Crc, +} + +impl Crc { + pub fn new() -> Crc { + Crc { crc: 0, amt: 0 } + } + + pub fn sum(&self) -> u32 { + self.crc as u32 + } + + pub fn amt_as_u32(&self) -> u32 { + self.amt + } + + pub fn update(&mut self, data: &[u8]) { + self.amt = self.amt.wrapping_add(data.len() as u32); + self.crc = unsafe { + ffi::mz_crc32(self.crc, data.as_ptr(), data.len() as libc::size_t) + }; + } +} + +impl CrcReader { + pub fn new(r: R) -> CrcReader { + CrcReader { + inner: r, + crc: Crc::new(), + } + } + + pub fn crc(&self) -> &Crc { + &self.crc + } + + pub fn into_inner(self) -> R { + self.inner + } + + pub fn inner(&mut self) -> &mut R { + &mut self.inner + } +} + +impl Read for CrcReader { + fn read(&mut self, into: &mut [u8]) -> io::Result { + let amt = try!(self.inner.read(into)); + self.crc.update(&into[..amt]); + Ok(amt) + } +} + +impl BufRead for CrcReader { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + self.inner.fill_buf() + } + fn consume(&mut self, amt: usize) { + if let Ok(data) = self.inner.fill_buf() { + self.crc.update(&data[..amt]); + } + self.inner.consume(amt); + } +} diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/src/deflate.rs cargo-0.19.0/vendor/flate2-0.2.17/src/deflate.rs --- cargo-0.17.0/vendor/flate2-0.2.17/src/deflate.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/src/deflate.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,584 @@ +//! DEFLATE compression and decompression of streams + +use std::io::prelude::*; +use std::io; +use std::mem; + +use bufreader::BufReader; +use zio; +use {Compress, Decompress}; + +/// A DEFLATE encoder, or compressor. +/// +/// This structure implements a `Write` interface and takes a stream of +/// uncompressed data, writing the compressed data to the wrapped writer. +pub struct EncoderWriter { + inner: zio::Writer, +} + +/// A DEFLATE encoder, or compressor. +/// +/// This structure implements a `Read` interface and will read uncompressed +/// data from an underlying stream and emit a stream of compressed data. +pub struct EncoderReader { + inner: EncoderReaderBuf>, +} + +/// A DEFLATE encoder, or compressor. +/// +/// This structure implements a `BufRead` interface and will read uncompressed +/// data from an underlying stream and emit a stream of compressed data. +pub struct EncoderReaderBuf { + obj: R, + data: Compress, +} + +/// A DEFLATE decoder, or decompressor. +/// +/// This structure implements a `Read` interface and takes a stream of +/// compressed data as input, providing the decompressed data when read from. +pub struct DecoderReader { + inner: DecoderReaderBuf>, +} + +/// A DEFLATE decoder, or decompressor. +/// +/// This structure implements a `BufRead` interface and takes a stream of +/// compressed data as input, providing the decompressed data when read from. +pub struct DecoderReaderBuf { + obj: R, + data: Decompress, +} + +/// A DEFLATE decoder, or decompressor. +/// +/// This structure implements a `Write` and will emit a stream of decompressed +/// data when fed a stream of compressed data. +pub struct DecoderWriter { + inner: zio::Writer, +} + +impl EncoderWriter { + /// Creates a new encoder which will write compressed data to the stream + /// given at the given compression level. + /// + /// When this encoder is dropped or unwrapped the final pieces of data will + /// be flushed. + pub fn new(w: W, level: ::Compression) -> EncoderWriter { + EncoderWriter { + inner: zio::Writer::new(w, Compress::new(level, false)), + } + } + + /// Resets the state of this encoder entirely, swapping out the output + /// stream for another. + /// + /// This function will finish encoding the current stream into the current + /// output stream before swapping out the two output streams. If the stream + /// cannot be finished an error is returned. + /// + /// After the current stream has been finished, this will reset the internal + /// state of this encoder and replace the output stream with the one + /// provided, returning the previous output stream. Future data written to + /// this encoder will be the compressed into the stream `w` provided. + pub fn reset(&mut self, w: W) -> io::Result { + try!(self.inner.finish()); + self.inner.data.reset(); + Ok(self.inner.replace(w)) + } + + /// Consumes this encoder, flushing the output stream. + /// + /// This will flush the underlying data stream and then return the contained + /// writer if the flush succeeded. + pub fn finish(mut self) -> io::Result { + try!(self.inner.finish()); + Ok(self.inner.into_inner()) + } +} + +impl Write for EncoderWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +impl EncoderReader { + /// Creates a new encoder which will read uncompressed data from the given + /// stream and emit the compressed stream. + pub fn new(r: R, level: ::Compression) -> EncoderReader { + EncoderReader { + inner: EncoderReaderBuf::new(BufReader::new(r), level), + } + } + + /// Resets the state of this encoder entirely, swapping out the input + /// stream for another. + /// + /// This function will reset the internal state of this encoder and replace + /// the input stream with the one provided, returning the previous input + /// stream. Future data read from this encoder will be the compressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + self.inner.data.reset(); + self.inner.obj.reset(r) + } + + /// Acquires a reference to the underlying reader + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this encoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } +} + +impl Read for EncoderReader { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.read(buf) + } +} + +impl EncoderReaderBuf { + /// Creates a new encoder which will read uncompressed data from the given + /// stream and emit the compressed stream. + pub fn new(r: R, level: ::Compression) -> EncoderReaderBuf { + EncoderReaderBuf { + obj: r, + data: Compress::new(level, false), + } + } + + /// Resets the state of this encoder entirely, swapping out the input + /// stream for another. + /// + /// This function will reset the internal state of this encoder and replace + /// the input stream with the one provided, returning the previous input + /// stream. Future data read from this encoder will be the compressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + self.data.reset(); + mem::replace(&mut self.obj, r) + } + + /// Acquires a reference to the underlying reader + pub fn get_ref(&self) -> &R { + &self.obj + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + &mut self.obj + } + + /// Consumes this encoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.obj + } +} + +impl Read for EncoderReaderBuf { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + zio::read(&mut self.obj, &mut self.data, buf) + } +} + +impl DecoderReader { + /// Creates a new decoder which will decompress data read from the given + /// stream. + pub fn new(r: R) -> DecoderReader { + DecoderReader::new_with_buf(r, vec![0; 32 * 1024]) + } + + /// Same as `new`, but the intermediate buffer for data is specified. + /// + /// Note that the capacity of the intermediate buffer is never increased, + /// and it is recommended for it to be large. + pub fn new_with_buf(r: R, buf: Vec) -> DecoderReader { + DecoderReader { + inner: DecoderReaderBuf::new(BufReader::with_buf(buf, r)) + } + } + + /// Resets the state of this decoder entirely, swapping out the input + /// stream for another. + /// + /// This will reset the internal state of this decoder and replace the + /// input stream with the one provided, returning the previous input + /// stream. Future data read from this decoder will be the decompressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + self.inner.data = Decompress::new(false); + self.inner.obj.reset(r) + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this decoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } + + /// Returns the number of bytes that the decompressor has consumed. + /// + /// Note that this will likely be smaller than what the decompressor + /// actually read from the underlying stream due to buffering. + pub fn total_in(&self) -> u64 { + self.inner.total_in() + } + + /// Returns the number of bytes that the decompressor has produced. + pub fn total_out(&self) -> u64 { + self.inner.total_out() + } +} + +impl Read for DecoderReader { + fn read(&mut self, into: &mut [u8]) -> io::Result { + self.inner.read(into) + } +} + +impl DecoderReaderBuf { + /// Creates a new decoder which will decompress data read from the given + /// stream. + pub fn new(r: R) -> DecoderReaderBuf { + DecoderReaderBuf { + obj: r, + data: Decompress::new(false), + } + } + + /// Resets the state of this decoder entirely, swapping out the input + /// stream for another. + /// + /// This will reset the internal state of this decoder and replace the + /// input stream with the one provided, returning the previous input + /// stream. Future data read from this decoder will be the decompressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + self.data = Decompress::new(false); + mem::replace(&mut self.obj, r) + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + &self.obj + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + &mut self.obj + } + + /// Consumes this decoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.obj + } + + /// Returns the number of bytes that the decompressor has consumed. + /// + /// Note that this will likely be smaller than what the decompressor + /// actually read from the underlying stream due to buffering. + pub fn total_in(&self) -> u64 { + self.data.total_in() + } + + /// Returns the number of bytes that the decompressor has produced. + pub fn total_out(&self) -> u64 { + self.data.total_out() + } +} + +impl Read for DecoderReaderBuf { + fn read(&mut self, into: &mut [u8]) -> io::Result { + zio::read(&mut self.obj, &mut self.data, into) + } +} + +impl DecoderWriter { + /// Creates a new decoder which will write uncompressed data to the stream. + /// + /// When this encoder is dropped or unwrapped the final pieces of data will + /// be flushed. + pub fn new(w: W) -> DecoderWriter { + DecoderWriter { + inner: zio::Writer::new(w, Decompress::new(false)), + } + } + + /// Resets the state of this decoder entirely, swapping out the output + /// stream for another. + /// + /// This function will finish encoding the current stream into the current + /// output stream before swapping out the two output streams. If the stream + /// cannot be finished an error is returned. + /// + /// This will then reset the internal state of this decoder and replace the + /// output stream with the one provided, returning the previous output + /// stream. Future data written to this decoder will be decompressed into + /// the output stream `w`. + pub fn reset(&mut self, w: W) -> io::Result { + try!(self.inner.finish()); + self.inner.data = Decompress::new(false); + Ok(self.inner.replace(w)) + } + + /// Consumes this encoder, flushing the output stream. + /// + /// This will flush the underlying data stream and then return the contained + /// writer if the flush succeeded. + pub fn finish(mut self) -> io::Result { + try!(self.inner.finish()); + Ok(self.inner.into_inner()) + } + + /// Returns the number of bytes that the decompressor has consumed for + /// decompression. + /// + /// Note that this will likely be smaller than the number of bytes + /// successfully written to this stream due to internal buffering. + pub fn total_in(&self) -> u64 { + self.inner.data.total_in() + } + + /// Returns the number of bytes that the decompressor has written to its + /// output stream. + pub fn total_out(&self) -> u64 { + self.inner.data.total_out() + } +} + +impl Write for DecoderWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +#[cfg(test)] +mod tests { + use std::io::prelude::*; + + use rand::{thread_rng, Rng}; + + use deflate::{EncoderWriter, EncoderReader, DecoderReader, DecoderWriter}; + use Compression::Default; + + #[test] + fn roundtrip() { + let mut real = Vec::new(); + let mut w = EncoderWriter::new(Vec::new(), Default); + let v = thread_rng().gen_iter::().take(1024).collect::>(); + for _ in 0..200 { + let to_write = &v[..thread_rng().gen_range(0, v.len())]; + real.extend(to_write.iter().map(|x| *x)); + w.write_all(to_write).unwrap(); + } + let result = w.finish().unwrap(); + let mut r = DecoderReader::new(&result[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == real); + } + + #[test] + fn drop_writes() { + let mut data = Vec::new(); + EncoderWriter::new(&mut data, Default).write_all(b"foo").unwrap(); + let mut r = DecoderReader::new(&data[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == b"foo"); + } + + #[test] + fn total_in() { + let mut real = Vec::new(); + let mut w = EncoderWriter::new(Vec::new(), Default); + let v = thread_rng().gen_iter::().take(1024).collect::>(); + for _ in 0..200 { + let to_write = &v[..thread_rng().gen_range(0, v.len())]; + real.extend(to_write.iter().map(|x| *x)); + w.write_all(to_write).unwrap(); + } + let mut result = w.finish().unwrap(); + + let result_len = result.len(); + + for _ in 0..200 { + result.extend(v.iter().map(|x| *x)); + } + + let mut r = DecoderReader::new(&result[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == real); + assert_eq!(r.total_in(), result_len as u64); + } + + #[test] + fn roundtrip2() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert_eq!(ret, v); + } + + #[test] + fn roundtrip3() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut w = EncoderWriter::new(DecoderWriter::new(Vec::new()), Default); + w.write_all(&v).unwrap(); + let w = w.finish().unwrap().finish().unwrap(); + assert!(w == v); + } + + #[test] + fn reset_writer() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut w = EncoderWriter::new(Vec::new(), Default); + w.write_all(&v).unwrap(); + let a = w.reset(Vec::new()).unwrap(); + w.write_all(&v).unwrap(); + let b = w.finish().unwrap(); + + let mut w = EncoderWriter::new(Vec::new(), Default); + w.write_all(&v).unwrap(); + let c = w.finish().unwrap(); + assert!(a == b && b == c); + } + + #[test] + fn reset_reader() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); + let mut r = EncoderReader::new(&v[..], Default); + r.read_to_end(&mut a).unwrap(); + r.reset(&v[..]); + r.read_to_end(&mut b).unwrap(); + + let mut r = EncoderReader::new(&v[..], Default); + r.read_to_end(&mut c).unwrap(); + assert!(a == b && b == c); + } + + #[test] + fn reset_decoder() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut w = EncoderWriter::new(Vec::new(), Default); + w.write_all(&v).unwrap(); + let data = w.finish().unwrap(); + + { + let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); + let mut r = DecoderReader::new(&data[..]); + r.read_to_end(&mut a).unwrap(); + r.reset(&data); + r.read_to_end(&mut b).unwrap(); + + let mut r = DecoderReader::new(&data[..]); + r.read_to_end(&mut c).unwrap(); + assert!(a == b && b == c && c == v); + } + + { + let mut w = DecoderWriter::new(Vec::new()); + w.write_all(&data).unwrap(); + let a = w.reset(Vec::new()).unwrap(); + w.write_all(&data).unwrap(); + let b = w.finish().unwrap(); + + let mut w = DecoderWriter::new(Vec::new()); + w.write_all(&data).unwrap(); + let c = w.finish().unwrap(); + assert!(a == b && b == c && c == v); + } + } + + #[test] + fn zero_length_read_with_data() { + let m = vec![3u8; 128 * 1024 + 1]; + let mut c = EncoderReader::new(&m[..], ::Compression::Default); + + let mut result = Vec::new(); + c.read_to_end(&mut result).unwrap(); + + let mut d = DecoderReader::new(&result[..]); + let mut data = Vec::new(); + assert!(d.read(&mut data).unwrap() == 0); + } + + #[test] + fn qc_reader() { + ::quickcheck::quickcheck(test as fn(_) -> _); + + fn test(v: Vec) -> bool { + let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)); + let mut v2 = Vec::new(); + r.read_to_end(&mut v2).unwrap(); + v == v2 + } + } + + #[test] + fn qc_writer() { + ::quickcheck::quickcheck(test as fn(_) -> _); + + fn test(v: Vec) -> bool { + let mut w = EncoderWriter::new(DecoderWriter::new(Vec::new()), Default); + w.write_all(&v).unwrap(); + v == w.finish().unwrap().finish().unwrap() + } + } +} diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/src/ffi.rs cargo-0.19.0/vendor/flate2-0.2.17/src/ffi.rs --- cargo-0.17.0/vendor/flate2-0.2.17/src/ffi.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/src/ffi.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,66 @@ +pub use self::imp::*; + +#[cfg(feature = "zlib")] +#[allow(bad_style)] +mod imp { + extern crate libz_sys as z; + use std::mem; + use libc::{c_int, size_t, c_ulong, c_uint, c_char}; + + pub use self::z::deflateEnd as mz_deflateEnd; + pub use self::z::inflateEnd as mz_inflateEnd; + pub use self::z::deflateReset as mz_deflateReset; + pub use self::z::deflate as mz_deflate; + pub use self::z::inflate as mz_inflate; + pub use self::z::z_stream as mz_stream; + + pub use self::z::Z_BLOCK as MZ_BLOCK; + pub use self::z::Z_BUF_ERROR as MZ_BUF_ERROR; + pub use self::z::Z_DATA_ERROR as MZ_DATA_ERROR; + pub use self::z::Z_DEFAULT_STRATEGY as MZ_DEFAULT_STRATEGY; + pub use self::z::Z_DEFLATED as MZ_DEFLATED; + pub use self::z::Z_FINISH as MZ_FINISH; + pub use self::z::Z_FULL_FLUSH as MZ_FULL_FLUSH; + pub use self::z::Z_NO_FLUSH as MZ_NO_FLUSH; + pub use self::z::Z_OK as MZ_OK; + pub use self::z::Z_PARTIAL_FLUSH as MZ_PARTIAL_FLUSH; + pub use self::z::Z_STREAM_END as MZ_STREAM_END; + pub use self::z::Z_SYNC_FLUSH as MZ_SYNC_FLUSH; + pub use self::z::Z_STREAM_ERROR as MZ_STREAM_ERROR; + + pub const MZ_DEFAULT_WINDOW_BITS: c_int = 15; + + pub unsafe extern fn mz_crc32(crc: c_ulong, + ptr: *const u8, + len: size_t) -> c_ulong { + z::crc32(crc, ptr, len as c_uint) + } + + const ZLIB_VERSION: &'static str = "1.2.8\0"; + + pub unsafe extern fn mz_deflateInit2(stream: *mut mz_stream, + level: c_int, + method: c_int, + window_bits: c_int, + mem_level: c_int, + strategy: c_int) -> c_int { + z::deflateInit2_(stream, level, method, window_bits, mem_level, + strategy, + ZLIB_VERSION.as_ptr() as *const c_char, + mem::size_of::() as c_int) + } + pub unsafe extern fn mz_inflateInit2(stream: *mut mz_stream, + window_bits: c_int) + -> c_int { + z::inflateInit2_(stream, window_bits, + ZLIB_VERSION.as_ptr() as *const c_char, + mem::size_of::() as c_int) + } +} + +#[cfg(not(feature = "zlib"))] +mod imp { + extern crate miniz_sys; + + pub use self::miniz_sys::*; +} diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/src/gz.rs cargo-0.19.0/vendor/flate2-0.2.17/src/gz.rs --- cargo-0.17.0/vendor/flate2-0.2.17/src/gz.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/src/gz.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,674 @@ +//! gzip compression/decompression +//! +//! [1]: http://www.gzip.org/zlib/rfc-gzip.html + +use std::cmp; +use std::env; +use std::ffi::CString; +use std::io::prelude::*; +use std::io; + +use {Compression, Compress}; +use bufreader::BufReader; +use crc::{CrcReader, Crc}; +use deflate; +use zio; + +static FHCRC: u8 = 1 << 1; +static FEXTRA: u8 = 1 << 2; +static FNAME: u8 = 1 << 3; +static FCOMMENT: u8 = 1 << 4; + +/// A gzip streaming encoder +/// +/// This structure exposes a `Write` interface that will emit compressed data +/// to the underlying writer `W`. +pub struct EncoderWriter { + inner: zio::Writer, + crc: Crc, + header: Vec, +} + +/// A gzip streaming encoder +/// +/// This structure exposes a `Read` interface that will read uncompressed data +/// from the underlying reader and expose the compressed version as a `Read` +/// interface. +pub struct EncoderReader { + inner: EncoderReaderBuf>, +} + +/// A gzip streaming encoder +/// +/// This structure exposes a `Read` interface that will read uncompressed data +/// from the underlying reader and expose the compressed version as a `Read` +/// interface. +pub struct EncoderReaderBuf { + inner: deflate::EncoderReaderBuf>, + header: Vec, + pos: usize, + eof: bool, +} + +/// A builder structure to create a new gzip Encoder. +/// +/// This structure controls header configuration options such as the filename. +pub struct Builder { + extra: Option>, + filename: Option, + comment: Option, + mtime: u32, +} + +/// A gzip streaming decoder +/// +/// This structure exposes a `Read` interface that will consume compressed +/// data from the underlying reader and emit uncompressed data. +pub struct DecoderReader { + inner: DecoderReaderBuf>, +} + +/// A gzip streaming decoder +/// +/// This structure exposes a `Read` interface that will consume compressed +/// data from the underlying reader and emit uncompressed data. +pub struct DecoderReaderBuf { + inner: CrcReader>, + header: Header, + finished: bool, +} + +/// A structure representing the header of a gzip stream. +/// +/// The header can contain metadata about the file that was compressed, if +/// present. +pub struct Header { + extra: Option>, + filename: Option>, + comment: Option>, + mtime: u32, +} + +impl Builder { + /// Create a new blank builder with no header by default. + pub fn new() -> Builder { + Builder { + extra: None, + filename: None, + comment: None, + mtime: 0, + } + } + + /// Configure the `mtime` field in the gzip header. + pub fn mtime(mut self, mtime: u32) -> Builder { + self.mtime = mtime; + self + } + + /// Configure the `extra` field in the gzip header. + pub fn extra(mut self, extra: Vec) -> Builder { + self.extra = Some(extra); + self + } + + /// Configure the `filename` field in the gzip header. + pub fn filename(mut self, filename: &[u8]) -> Builder { + self.filename = Some(CString::new(filename).unwrap()); + self + } + + /// Configure the `comment` field in the gzip header. + pub fn comment(mut self, comment: &[u8]) -> Builder { + self.comment = Some(CString::new(comment).unwrap()); + self + } + + /// Consume this builder, creating a writer encoder in the process. + /// + /// The data written to the returned encoder will be compressed and then + /// written out to the supplied parameter `w`. + pub fn write(self, w: W, lvl: Compression) -> EncoderWriter { + EncoderWriter { + inner: zio::Writer::new(w, Compress::new(lvl, false)), + crc: Crc::new(), + header: self.into_header(lvl), + } + } + + /// Consume this builder, creating a reader encoder in the process. + /// + /// Data read from the returned encoder will be the compressed version of + /// the data read from the given reader. + pub fn read(self, r: R, lvl: Compression) -> EncoderReader { + EncoderReader { + inner: self.buf_read(BufReader::new(r), lvl), + } + } + + /// Consume this builder, creating a reader encoder in the process. + /// + /// Data read from the returned encoder will be the compressed version of + /// the data read from the given reader. + pub fn buf_read(self, r: R, lvl: Compression) -> EncoderReaderBuf + where R: BufRead + { + let crc = CrcReader::new(r); + EncoderReaderBuf { + inner: deflate::EncoderReaderBuf::new(crc, lvl), + header: self.into_header(lvl), + pos: 0, + eof: false, + } + } + + fn into_header(self, lvl: Compression) -> Vec { + let Builder { extra, filename, comment, mtime } = self; + let mut flg = 0; + let mut header = vec![0u8; 10]; + match extra { + Some(v) => { + flg |= FEXTRA; + header.push((v.len() >> 0) as u8); + header.push((v.len() >> 8) as u8); + header.extend(v); + } + None => {} + } + match filename { + Some(filename) => { + flg |= FNAME; + header.extend(filename.as_bytes_with_nul().iter().map(|x| *x)); + } + None => {} + } + match comment { + Some(comment) => { + flg |= FCOMMENT; + header.extend(comment.as_bytes_with_nul().iter().map(|x| *x)); + } + None => {} + } + header[0] = 0x1f; + header[1] = 0x8b; + header[2] = 8; + header[3] = flg; + header[4] = (mtime >> 0) as u8; + header[5] = (mtime >> 8) as u8; + header[6] = (mtime >> 16) as u8; + header[7] = (mtime >> 24) as u8; + header[8] = match lvl { + Compression::Best => 2, + Compression::Fast => 4, + _ => 0, + }; + header[9] = match env::consts::OS { + "linux" => 3, + "macos" => 7, + "win32" => 0, + _ => 255, + }; + return header; + } +} + +impl EncoderWriter { + /// Creates a new encoder which will use the given compression level. + /// + /// The encoder is not configured specially for the emitted header. For + /// header configuration, see the `Builder` type. + /// + /// The data written to the returned encoder will be compressed and then + /// written to the stream `w`. + pub fn new(w: W, level: Compression) -> EncoderWriter { + Builder::new().write(w, level) + } + + /// Finish encoding this stream, returning the underlying writer once the + /// encoding is done. + pub fn finish(mut self) -> io::Result { + try!(self.do_finish()); + Ok(self.inner.take_inner().unwrap()) + } + + fn do_finish(&mut self) -> io::Result<()> { + if self.header.len() != 0 { + try!(self.inner.get_mut().unwrap().write_all(&self.header)); + } + try!(self.inner.finish()); + let mut inner = self.inner.get_mut().unwrap(); + let (sum, amt) = (self.crc.sum() as u32, self.crc.amt_as_u32()); + let buf = [(sum >> 0) as u8, + (sum >> 8) as u8, + (sum >> 16) as u8, + (sum >> 24) as u8, + (amt >> 0) as u8, + (amt >> 8) as u8, + (amt >> 16) as u8, + (amt >> 24) as u8]; + inner.write_all(&buf) + } +} + +impl Write for EncoderWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + if self.header.len() != 0 { + try!(self.inner.get_mut().unwrap().write_all(&self.header)); + self.header.truncate(0); + } + let n = try!(self.inner.write(buf)); + self.crc.update(&buf[..n]); + Ok(n) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +impl Drop for EncoderWriter { + fn drop(&mut self) { + if self.inner.get_mut().is_some() { + let _ = self.do_finish(); + } + } +} + +impl EncoderReader { + /// Creates a new encoder which will use the given compression level. + /// + /// The encoder is not configured specially for the emitted header. For + /// header configuration, see the `Builder` type. + /// + /// The data read from the stream `r` will be compressed and available + /// through the returned reader. + pub fn new(r: R, level: Compression) -> EncoderReader { + Builder::new().read(r, level) + } + + /// Returns the underlying stream, consuming this encoder + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } +} + +fn copy(into: &mut [u8], from: &[u8], pos: &mut usize) -> usize { + let min = cmp::min(into.len(), from.len() - *pos); + for (slot, val) in into.iter_mut().zip(from[*pos..*pos + min].iter()) { + *slot = *val; + } + *pos += min; + return min; +} + +impl Read for EncoderReader { + fn read(&mut self, mut into: &mut [u8]) -> io::Result { + self.inner.read(into) + } +} + +impl EncoderReaderBuf { + /// Creates a new encoder which will use the given compression level. + /// + /// The encoder is not configured specially for the emitted header. For + /// header configuration, see the `Builder` type. + /// + /// The data read from the stream `r` will be compressed and available + /// through the returned reader. + pub fn new(r: R, level: Compression) -> EncoderReaderBuf { + Builder::new().buf_read(r, level) + } + + /// Returns the underlying stream, consuming this encoder + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } + + fn read_footer(&mut self, into: &mut [u8]) -> io::Result { + if self.pos == 8 { + return Ok(0); + } + let crc = self.inner.get_ref().crc(); + let ref arr = [(crc.sum() >> 0) as u8, + (crc.sum() >> 8) as u8, + (crc.sum() >> 16) as u8, + (crc.sum() >> 24) as u8, + (crc.amt_as_u32() >> 0) as u8, + (crc.amt_as_u32() >> 8) as u8, + (crc.amt_as_u32() >> 16) as u8, + (crc.amt_as_u32() >> 24) as u8]; + Ok(copy(into, arr, &mut self.pos)) + } +} + +impl Read for EncoderReaderBuf { + fn read(&mut self, mut into: &mut [u8]) -> io::Result { + let mut amt = 0; + if self.eof { + return self.read_footer(into); + } else if self.pos < self.header.len() { + amt += copy(into, &self.header, &mut self.pos); + if amt == into.len() { + return Ok(amt); + } + let tmp = into; + into = &mut tmp[amt..]; + } + match try!(self.inner.read(into)) { + 0 => { + self.eof = true; + self.pos = 0; + self.read_footer(into) + } + n => Ok(amt + n), + } + } +} + +impl DecoderReader { + /// Creates a new decoder from the given reader, immediately parsing the + /// gzip header. + /// + /// If an error is encountered when parsing the gzip header, an error is + /// returned. + pub fn new(r: R) -> io::Result> { + DecoderReaderBuf::new(BufReader::new(r)).map(|r| { + DecoderReader { inner: r } + }) + } + + /// Returns the header associated with this stream. + pub fn header(&self) -> &Header { + self.inner.header() + } +} + +impl Read for DecoderReader { + fn read(&mut self, into: &mut [u8]) -> io::Result { + self.inner.read(into) + } +} + +impl DecoderReaderBuf { + /// Creates a new decoder from the given reader, immediately parsing the + /// gzip header. + /// + /// If an error is encountered when parsing the gzip header, an error is + /// returned. + pub fn new(r: R) -> io::Result> { + let mut crc_reader = CrcReader::new(r); + let mut header = [0; 10]; + try!(crc_reader.read_exact(&mut header)); + + let id1 = header[0]; + let id2 = header[1]; + if id1 != 0x1f || id2 != 0x8b { + return Err(bad_header()); + } + let cm = header[2]; + if cm != 8 { + return Err(bad_header()); + } + + let flg = header[3]; + let mtime = ((header[4] as u32) << 0) | ((header[5] as u32) << 8) | + ((header[6] as u32) << 16) | + ((header[7] as u32) << 24); + let _xfl = header[8]; + let _os = header[9]; + + let extra = if flg & FEXTRA != 0 { + let xlen = try!(read_le_u16(&mut crc_reader)); + let mut extra = vec![0; xlen as usize]; + try!(crc_reader.read_exact(&mut extra)); + Some(extra) + } else { + None + }; + let filename = if flg & FNAME != 0 { + // wow this is slow + let mut b = Vec::new(); + for byte in crc_reader.by_ref().bytes() { + let byte = try!(byte); + if byte == 0 { + break; + } + b.push(byte); + } + Some(b) + } else { + None + }; + let comment = if flg & FCOMMENT != 0 { + // wow this is slow + let mut b = Vec::new(); + for byte in crc_reader.by_ref().bytes() { + let byte = try!(byte); + if byte == 0 { + break; + } + b.push(byte); + } + Some(b) + } else { + None + }; + + if flg & FHCRC != 0 { + let calced_crc = crc_reader.crc().sum() as u16; + let stored_crc = try!(read_le_u16(&mut crc_reader)); + if calced_crc != stored_crc { + return Err(corrupt()); + } + } + + let flate = deflate::DecoderReaderBuf::new(crc_reader.into_inner()); + return Ok(DecoderReaderBuf { + inner: CrcReader::new(flate), + header: Header { + extra: extra, + filename: filename, + comment: comment, + mtime: mtime, + }, + finished: false, + }); + + fn bad_header() -> io::Error { + io::Error::new(io::ErrorKind::InvalidInput, "invalid gzip header") + } + + fn read_le_u16(r: &mut R) -> io::Result { + let mut b = [0; 2]; + try!(r.read_exact(&mut b)); + Ok((b[0] as u16) | ((b[1] as u16) << 8)) + } + } + + /// Returns the header associated with this stream. + pub fn header(&self) -> &Header { + &self.header + } + + fn finish(&mut self) -> io::Result<()> { + if self.finished { + return Ok(()); + } + let ref mut buf = [0u8; 8]; + { + let mut len = 0; + + while len < buf.len() { + match try!(self.inner.inner().get_mut().read(&mut buf[len..])) { + 0 => return Err(corrupt()), + n => len += n, + } + } + } + + let crc = ((buf[0] as u32) << 0) | ((buf[1] as u32) << 8) | + ((buf[2] as u32) << 16) | + ((buf[3] as u32) << 24); + let amt = ((buf[4] as u32) << 0) | ((buf[5] as u32) << 8) | + ((buf[6] as u32) << 16) | + ((buf[7] as u32) << 24); + if crc != self.inner.crc().sum() as u32 { + return Err(corrupt()); + } + if amt != self.inner.crc().amt_as_u32() { + return Err(corrupt()); + } + self.finished = true; + Ok(()) + } +} + +impl Read for DecoderReaderBuf { + fn read(&mut self, into: &mut [u8]) -> io::Result { + match try!(self.inner.read(into)) { + 0 => { + try!(self.finish()); + Ok(0) + } + n => Ok(n), + } + } +} + +impl Header { + /// Returns the `filename` field of this gzip stream's header, if present. + pub fn filename(&self) -> Option<&[u8]> { + self.filename.as_ref().map(|s| &s[..]) + } + + /// Returns the `extra` field of this gzip stream's header, if present. + pub fn extra(&self) -> Option<&[u8]> { + self.extra.as_ref().map(|s| &s[..]) + } + + /// Returns the `comment` field of this gzip stream's header, if present. + pub fn comment(&self) -> Option<&[u8]> { + self.comment.as_ref().map(|s| &s[..]) + } + + /// Returns the `mtime` field of this gzip stream's header, if present. + pub fn mtime(&self) -> u32 { + self.mtime + } +} + +fn corrupt() -> io::Error { + io::Error::new(io::ErrorKind::InvalidInput, + "corrupt gzip stream does not have a matching checksum") +} + +#[cfg(test)] +mod tests { + use std::io::prelude::*; + + use super::{EncoderWriter, EncoderReader, DecoderReader, Builder}; + use Compression::Default; + use rand::{thread_rng, Rng}; + + #[test] + fn roundtrip() { + let mut e = EncoderWriter::new(Vec::new(), Default); + e.write_all(b"foo bar baz").unwrap(); + let inner = e.finish().unwrap(); + let mut d = DecoderReader::new(&inner[..]).unwrap(); + let mut s = String::new(); + d.read_to_string(&mut s).unwrap(); + assert_eq!(s, "foo bar baz"); + } + + #[test] + fn roundtrip_zero() { + let e = EncoderWriter::new(Vec::new(), Default); + let inner = e.finish().unwrap(); + let mut d = DecoderReader::new(&inner[..]).unwrap(); + let mut s = String::new(); + d.read_to_string(&mut s).unwrap(); + assert_eq!(s, ""); + } + + #[test] + fn roundtrip_big() { + let mut real = Vec::new(); + let mut w = EncoderWriter::new(Vec::new(), Default); + let v = thread_rng().gen_iter::().take(1024).collect::>(); + for _ in 0..200 { + let to_write = &v[..thread_rng().gen_range(0, v.len())]; + real.extend(to_write.iter().map(|x| *x)); + w.write_all(to_write).unwrap(); + } + let result = w.finish().unwrap(); + let mut r = DecoderReader::new(&result[..]).unwrap(); + let mut v = Vec::new(); + r.read_to_end(&mut v).unwrap(); + assert!(v == real); + } + + #[test] + fn roundtrip_big2() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)) + .unwrap(); + let mut res = Vec::new(); + r.read_to_end(&mut res).unwrap(); + assert!(res == v); + } + + #[test] + fn fields() { + let r = vec![0, 2, 4, 6]; + let e = Builder::new() + .filename(b"foo.rs") + .comment(b"bar") + .extra(vec![0, 1, 2, 3]) + .read(&r[..], Default); + let mut d = DecoderReader::new(e).unwrap(); + assert_eq!(d.header().filename(), Some(&b"foo.rs"[..])); + assert_eq!(d.header().comment(), Some(&b"bar"[..])); + assert_eq!(d.header().extra(), Some(&b"\x00\x01\x02\x03"[..])); + let mut res = Vec::new(); + d.read_to_end(&mut res).unwrap(); + assert_eq!(res, vec![0, 2, 4, 6]); + + } + + #[test] + fn keep_reading_after_end() { + let mut e = EncoderWriter::new(Vec::new(), Default); + e.write_all(b"foo bar baz").unwrap(); + let inner = e.finish().unwrap(); + let mut d = DecoderReader::new(&inner[..]).unwrap(); + let mut s = String::new(); + d.read_to_string(&mut s).unwrap(); + assert_eq!(s, "foo bar baz"); + d.read_to_string(&mut s).unwrap(); + assert_eq!(s, "foo bar baz"); + } + + #[test] + fn qc_reader() { + ::quickcheck::quickcheck(test as fn(_) -> _); + + fn test(v: Vec) -> bool { + let r = EncoderReader::new(&v[..], Default); + let mut r = DecoderReader::new(r).unwrap(); + let mut v2 = Vec::new(); + r.read_to_end(&mut v2).unwrap(); + v == v2 + } + } + + #[test] + fn flush_after_write() { + let mut f = EncoderWriter::new(Vec::new(), Default); + write!(f, "Hello world").unwrap(); + f.flush().unwrap(); + } +} diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/src/lib.rs cargo-0.19.0/vendor/flate2-0.2.17/src/lib.rs --- cargo-0.17.0/vendor/flate2-0.2.17/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/src/lib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,213 @@ +//! A DEFLATE-based stream compression/decompression library +//! +//! This library is meant to supplement/replace the standard distributon's +//! libflate library by providing a streaming encoder/decoder rather than purely +//! an in-memory encoder/decoder. +//! +//! Like with libflate, flate2 is based on [`miniz.c`][1] +//! +//! [1]: https://code.google.com/p/miniz/ +//! +//! # Organization +//! +//! This crate consists mainly of two modules, `read` and `write`. Each +//! module contains a number of types used to encode and decode various streams +//! of data. All types in the `write` module work on instances of `Write`, +//! whereas all types in the `read` module work on instances of `Read`. +//! +//! Other various types are provided at the top-level of the crate for +//! management and dealing with encoders/decoders. +//! +//! # Helper traits +//! +//! There are two helper traits provided: `FlateReader` and `FlateWriter`. +//! These provide convenience methods for creating a decoder/encoder out of an +//! already existing stream to chain construction. + +#![doc(html_root_url = "https://docs.rs/flate2/0.2")] +#![deny(missing_docs)] +#![allow(trivial_numeric_casts)] +#![cfg_attr(test, deny(warnings))] + +extern crate libc; +#[cfg(test)] +extern crate rand; +#[cfg(test)] +extern crate quickcheck; + +use std::io::prelude::*; +use std::io; + +pub use gz::Builder as GzBuilder; +pub use gz::Header as GzHeader; +pub use mem::{Compress, Decompress, DataError, Status, Flush}; + +mod bufreader; +mod crc; +mod deflate; +mod ffi; +mod gz; +mod zio; +mod mem; +mod zlib; + +/// Types which operate over `Read` streams, both encoders and decoders for +/// various formats. +pub mod read { + pub use deflate::EncoderReader as DeflateEncoder; + pub use deflate::DecoderReader as DeflateDecoder; + pub use zlib::EncoderReader as ZlibEncoder; + pub use zlib::DecoderReader as ZlibDecoder; + pub use gz::EncoderReader as GzEncoder; + pub use gz::DecoderReader as GzDecoder; +} + +/// Types which operate over `Write` streams, both encoders and decoders for +/// various formats. +pub mod write { + pub use deflate::EncoderWriter as DeflateEncoder; + pub use deflate::DecoderWriter as DeflateDecoder; + pub use zlib::EncoderWriter as ZlibEncoder; + pub use zlib::DecoderWriter as ZlibDecoder; + pub use gz::EncoderWriter as GzEncoder; +} + +/// Types which operate over `BufRead` streams, both encoders and decoders for +/// various formats. +pub mod bufread { + pub use deflate::EncoderReaderBuf as DeflateEncoder; + pub use deflate::DecoderReaderBuf as DeflateDecoder; + pub use zlib::EncoderReaderBuf as ZlibEncoder; + pub use zlib::DecoderReaderBuf as ZlibDecoder; + pub use gz::EncoderReaderBuf as GzEncoder; + pub use gz::DecoderReaderBuf as GzDecoder; +} + +fn _assert_send_sync() { + fn _assert_send_sync() {} + + _assert_send_sync::>(); + _assert_send_sync::>(); + _assert_send_sync::>(); + _assert_send_sync::>(); + _assert_send_sync::>(); + _assert_send_sync::>(); + _assert_send_sync::>>(); + _assert_send_sync::>>(); + _assert_send_sync::>>(); + _assert_send_sync::>>(); + _assert_send_sync::>>(); +} + +/// When compressing data, the compression level can be specified by a value in +/// this enum. +#[derive(Copy, Clone)] +pub enum Compression { + /// No compression is to be performed, this may actually inflate data + /// slightly when encoding. + None = 0, + /// Optimize for the best speed of encoding. + Fast = 1, + /// Optimize for the size of data being encoded. + Best = 9, + /// Choose the default compression, a balance between speed and size. + Default = 6, +} + +/// A helper trait to create encoder/decoders with method syntax. +pub trait FlateReadExt: Read + Sized { + /// Consume this reader to create a compression stream at the specified + /// compression level. + fn gz_encode(self, lvl: Compression) -> read::GzEncoder { + read::GzEncoder::new(self, lvl) + } + + /// Consume this reader to create a decompression stream of this stream. + fn gz_decode(self) -> io::Result> { + read::GzDecoder::new(self) + } + + /// Consume this reader to create a compression stream at the specified + /// compression level. + fn zlib_encode(self, lvl: Compression) -> read::ZlibEncoder { + read::ZlibEncoder::new(self, lvl) + } + + /// Consume this reader to create a decompression stream of this stream. + fn zlib_decode(self) -> read::ZlibDecoder { + read::ZlibDecoder::new(self) + } + + /// Consume this reader to create a compression stream at the specified + /// compression level. + fn deflate_encode(self, lvl: Compression) -> read::DeflateEncoder { + read::DeflateEncoder::new(self, lvl) + } + + /// Consume this reader to create a decompression stream of this stream. + fn deflate_decode(self) -> read::DeflateDecoder { + read::DeflateDecoder::new(self) + } +} + +/// A helper trait to create encoder/decoders with method syntax. +pub trait FlateWriteExt: Write + Sized { + /// Consume this writer to create a compression stream at the specified + /// compression level. + fn gz_encode(self, lvl: Compression) -> write::GzEncoder { + write::GzEncoder::new(self, lvl) + } + + // TODO: coming soon to a theater near you! + // /// Consume this writer to create a decompression stream of this stream. + // fn gz_decode(self) -> IoResult> { + // write::GzDecoder::new(self) + // } + + /// Consume this writer to create a compression stream at the specified + /// compression level. + fn zlib_encode(self, lvl: Compression) -> write::ZlibEncoder { + write::ZlibEncoder::new(self, lvl) + } + + /// Consume this writer to create a decompression stream of this stream. + fn zlib_decode(self) -> write::ZlibDecoder { + write::ZlibDecoder::new(self) + } + + /// Consume this writer to create a compression stream at the specified + /// compression level. + fn deflate_encode(self, lvl: Compression) -> write::DeflateEncoder { + write::DeflateEncoder::new(self, lvl) + } + + /// Consume this writer to create a decompression stream of this stream. + fn deflate_decode(self) -> write::DeflateDecoder { + write::DeflateDecoder::new(self) + } +} + +impl FlateReadExt for T {} +impl FlateWriteExt for T {} + +#[cfg(test)] +mod test { + use std::io::prelude::*; + use {FlateReadExt, Compression}; + + #[test] + fn crazy() { + let rdr = &mut b"foobar"; + let mut res = Vec::new(); + rdr.gz_encode(Compression::Default) + .deflate_encode(Compression::Default) + .zlib_encode(Compression::Default) + .zlib_decode() + .deflate_decode() + .gz_decode() + .unwrap() + .read_to_end(&mut res) + .unwrap(); + assert_eq!(res, b"foobar"); + } +} diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/src/mem.rs cargo-0.19.0/vendor/flate2-0.2.17/src/mem.rs --- cargo-0.17.0/vendor/flate2-0.2.17/src/mem.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/src/mem.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,434 @@ +use std::error::Error; +use std::fmt; +use std::io; +use std::marker; +use std::mem; +use std::slice; + +use libc::{c_int, c_uint}; + +use Compression; +use ffi; + +/// Raw in-memory compression stream for blocks of data. +/// +/// This type is the building block for the I/O streams in the rest of this +/// crate. It requires more management than the `Read`/`Write` API but is +/// maximally flexible in terms of accepting input from any source and being +/// able to produce output to any memory location. +/// +/// It is recommended to use the I/O stream adaptors over this type as they're +/// easier to use. +pub struct Compress { + inner: Stream, +} + +/// Raw in-memory decompression stream for blocks of data. +/// +/// This type is the building block for the I/O streams in the rest of this +/// crate. It requires more management than the `Read`/`Write` API but is +/// maximally flexible in terms of accepting input from any source and being +/// able to produce output to any memory location. +/// +/// It is recommended to use the I/O stream adaptors over this type as they're +/// easier to use. +pub struct Decompress { + inner: Stream, +} + +struct Stream { + raw: ffi::mz_stream, + total_in: u64, + total_out: u64, + _marker: marker::PhantomData, +} + +unsafe impl Send for Stream {} +unsafe impl Sync for Stream {} + +trait Direction { + unsafe fn destroy(stream: *mut ffi::mz_stream) -> c_int; +} + +enum DirCompress {} +enum DirDecompress {} + +/// Values which indicate the form of flushing to be used when compressing or +/// decompressing in-memory data. +pub enum Flush { + /// A typical parameter for passing to compression/decompression functions, + /// this indicates that the underlying stream to decide how much data to + /// accumulate before producing output in order to maximize compression. + None = ffi::MZ_NO_FLUSH as isize, + + /// All pending output is flushed to the output buffer and the output is + /// aligned on a byte boundary so that the decompressor can get all input + /// data available so far. + /// + /// Flushing may degrade compression for some compression algorithms and so + /// it should only be used when necessary. This will complete the current + /// deflate block and follow it with an empty stored block. + Sync = ffi::MZ_SYNC_FLUSH as isize, + + /// All pending output is flushed to the output buffer, but the output is + /// not aligned to a byte boundary. + /// + /// All of the input data so far will be available to the decompressor (as + /// with `Flush::Sync`. This completes the current deflate block and follows + /// it with an empty fixed codes block that is 10 bites long, and it assures + /// that enough bytes are output in order for the decompessor to finish the + /// block before the empty fixed code block. + Partial = ffi::MZ_PARTIAL_FLUSH as isize, + + /// A deflate block is completed and emitted, as for `Flush::Sync`, but the + /// output is not aligned on a byte boundary and up to seven vits of the + /// current block are held to be written as the next byte after the next + /// deflate block is completed. + /// + /// In this case the decompressor may not be provided enough bits at this + /// point in order to complete decompression of the data provided so far to + /// the compressor, it may need to wait for the next block to be emitted. + /// This is for advanced applications that need to control the emission of + /// deflate blocks. + Block = ffi::MZ_BLOCK as isize, + + /// All output is flushed as with `Flush::Sync` and the compression state is + /// reset so decompression can restart from this point if previous + /// compressed data has been damaged or if random access is desired. + /// + /// Using this option too often can seriously degrade compression. + Full = ffi::MZ_FULL_FLUSH as isize, + + /// Pending input is processed and pending output is flushed. + /// + /// The return value may indicate that the stream is not yet done and more + /// data has yet to be processed. + Finish = ffi::MZ_FINISH as isize, +} + +/// Error returned when a decompression object finds that the input stream of +/// bytes was not a valid input stream of bytes. +#[derive(Debug)] +pub struct DataError(()); + +/// Possible status results of compressing some data or successfully +/// decompressing a block of data. +pub enum Status { + /// Indicates success. + /// + /// Means that more input may be needed but isn't available + /// and/or there' smore output to be written but the output buffer is full. + Ok, + + /// Indicates that forward progress is not possible due to input or output + /// buffers being empty. + /// + /// For compression it means the input buffer needs some more data or the + /// output buffer needs to be freed up before trying again. + /// + /// For decompression this means that more input is needed to continue or + /// the output buffer isn't large enough to contain the result. The function + /// can be called again after fixing both. + BufError, + + /// Indicates that all input has been consumed and all output bytes have + /// been written. Decompression/compression should not be called again. + /// + /// For decompression with zlib streams the adler-32 of the decompressed + /// data has also been verified. + StreamEnd, +} + +impl Compress { + /// Creates a new object ready for compressing data that it's given. + /// + /// The `level` argument here indicates what level of compression is going + /// to be performed, and the `zlib_header` argument indicates whether the + /// output data should have a zlib header or not. + pub fn new(level: Compression, zlib_header: bool) -> Compress { + unsafe { + let mut state: ffi::mz_stream = mem::zeroed(); + let ret = ffi::mz_deflateInit2(&mut state, + level as c_int, + ffi::MZ_DEFLATED, + if zlib_header { + ffi::MZ_DEFAULT_WINDOW_BITS + } else { + -ffi::MZ_DEFAULT_WINDOW_BITS + }, + 9, + ffi::MZ_DEFAULT_STRATEGY); + debug_assert_eq!(ret, 0); + Compress { + inner: Stream { + raw: state, + total_in: 0, + total_out: 0, + _marker: marker::PhantomData, + }, + } + } + } + + /// Returns the total number of input bytes which have been processed by + /// this compression object. + pub fn total_in(&self) -> u64 { + self.inner.total_in + } + + /// Returns the total number of output bytes which have been produced by + /// this compression object. + pub fn total_out(&self) -> u64 { + self.inner.total_out + } + + /// Quickly resets this compressor without having to reallocate anything. + /// + /// This is equivalent to dropping this object and then creating a new one. + pub fn reset(&mut self) { + let rc = unsafe { ffi::mz_deflateReset(&mut self.inner.raw) }; + assert_eq!(rc, ffi::MZ_OK); + + self.inner.total_in = 0; + self.inner.total_out = 0; + } + + /// Compresses the input data into the output, consuming only as much + /// input as needed and writing as much output as possible. + /// + /// The flush option can be any of the available flushing parameters. + /// + /// To learn how much data was consumed or how much output was produced, use + /// the `total_in` and `total_out` functions before/after this is called. + pub fn compress(&mut self, + input: &[u8], + output: &mut [u8], + flush: Flush) + -> Status { + self.inner.raw.next_in = input.as_ptr() as *mut _; + self.inner.raw.avail_in = input.len() as c_uint; + self.inner.raw.next_out = output.as_mut_ptr(); + self.inner.raw.avail_out = output.len() as c_uint; + + let rc = unsafe { ffi::mz_deflate(&mut self.inner.raw, flush as c_int) }; + + // Unfortunately the total counters provided by zlib might be only + // 32 bits wide and overflow while processing large amounts of data. + self.inner.total_in += (self.inner.raw.next_in as usize - + input.as_ptr() as usize) as u64; + self.inner.total_out += (self.inner.raw.next_out as usize - + output.as_ptr() as usize) as u64; + + match rc { + ffi::MZ_OK => Status::Ok, + ffi::MZ_BUF_ERROR => Status::BufError, + ffi::MZ_STREAM_END => Status::StreamEnd, + c => panic!("unknown return code: {}", c), + } + } + + /// Compresses the input data into the extra space of the output, consuming + /// only as much input as needed and writing as much output as possible. + /// + /// This function has the same semantics as `compress`, except that the + /// length of `vec` is managed by this function. This will not reallocate + /// the vector provided or attempt to grow it, so space for the output must + /// be reserved in the output vector by the caller before calling this + /// function. + pub fn compress_vec(&mut self, + input: &[u8], + output: &mut Vec, + flush: Flush) + -> Status { + let cap = output.capacity(); + let len = output.len(); + + unsafe { + let before = self.total_out(); + let ret = { + let ptr = output.as_mut_ptr().offset(len as isize); + let out = slice::from_raw_parts_mut(ptr, cap - len); + self.compress(input, out, flush) + }; + output.set_len((self.total_out() - before) as usize + len); + return ret + } + } +} + +impl Decompress { + /// Creates a new object ready for decompressing data that it's given. + /// + /// The `zlib_header` argument indicates whether the input data is expected + /// to have a zlib header or not. + pub fn new(zlib_header: bool) -> Decompress { + unsafe { + let mut state: ffi::mz_stream = mem::zeroed(); + let ret = ffi::mz_inflateInit2(&mut state, + if zlib_header { + ffi::MZ_DEFAULT_WINDOW_BITS + } else { + -ffi::MZ_DEFAULT_WINDOW_BITS + }); + debug_assert_eq!(ret, 0); + Decompress { + inner: Stream { + raw: state, + total_in: 0, + total_out: 0, + _marker: marker::PhantomData, + }, + } + } + } + + /// Returns the total number of input bytes which have been processed by + /// this decompression object. + pub fn total_in(&self) -> u64 { + self.inner.total_in + } + + /// Returns the total number of output bytes which have been produced by + /// this decompression object. + pub fn total_out(&self) -> u64 { + self.inner.total_out + } + + /// Decompresses the input data into the output, consuming only as much + /// input as needed and writing as much output as possible. + /// + /// The flush option provided can either be `Flush::None`, `Flush::Sync`, + /// or `Flush::Finish`. If the first call passes `Flush::Finish` it is + /// assumed that the input and output buffers are both sized large enough to + /// decompress the entire stream in a single call. + /// + /// A flush value of `Flush::Finish` indicates that there are no more source + /// bytes available beside what's already in the input buffer, and the + /// output buffer is large enough to hold the rest of the decompressed data. + /// + /// To learn how much data was consumed or how much output was produced, use + /// the `total_in` and `total_out` functions before/after this is called. + pub fn decompress(&mut self, + input: &[u8], + output: &mut [u8], + flush: Flush) + -> Result { + self.inner.raw.next_in = input.as_ptr() as *mut u8; + self.inner.raw.avail_in = input.len() as c_uint; + self.inner.raw.next_out = output.as_mut_ptr(); + self.inner.raw.avail_out = output.len() as c_uint; + + let rc = unsafe { ffi::mz_inflate(&mut self.inner.raw, flush as c_int) }; + + // Unfortunately the total counters provided by zlib might be only + // 32 bits wide and overflow while processing large amounts of data. + self.inner.total_in += (self.inner.raw.next_in as usize - + input.as_ptr() as usize) as u64; + self.inner.total_out += (self.inner.raw.next_out as usize - + output.as_ptr() as usize) as u64; + + match rc { + ffi::MZ_DATA_ERROR | + ffi::MZ_STREAM_ERROR => Err(DataError(())), + ffi::MZ_OK => Ok(Status::Ok), + ffi::MZ_BUF_ERROR => Ok(Status::BufError), + ffi::MZ_STREAM_END => Ok(Status::StreamEnd), + c => panic!("unknown return code: {}", c), + } + } + + /// Decompresses the input data into the extra space in the output vector + /// specified by `output`. + /// + /// This function has the same semantics as `decompress`, except that the + /// length of `vec` is managed by this function. This will not reallocate + /// the vector provided or attempt to grow it, so space for the output must + /// be reserved in the output vector by the caller before calling this + /// function. + pub fn decompress_vec(&mut self, + input: &[u8], + output: &mut Vec, + flush: Flush) + -> Result { + let cap = output.capacity(); + let len = output.len(); + + unsafe { + let before = self.total_out(); + let ret = { + let ptr = output.as_mut_ptr().offset(len as isize); + let out = slice::from_raw_parts_mut(ptr, cap - len); + self.decompress(input, out, flush) + }; + output.set_len((self.total_out() - before) as usize + len); + return ret + } + } +} + +impl Error for DataError { + fn description(&self) -> &str { "deflate data error" } +} + +impl From for io::Error { + fn from(data: DataError) -> io::Error { + io::Error::new(io::ErrorKind::Other, data) + } +} + +impl fmt::Display for DataError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.description().fmt(f) + } +} + +impl Direction for DirCompress { + unsafe fn destroy(stream: *mut ffi::mz_stream) -> c_int { + ffi::mz_deflateEnd(stream) + } +} +impl Direction for DirDecompress { + unsafe fn destroy(stream: *mut ffi::mz_stream) -> c_int { + ffi::mz_inflateEnd(stream) + } +} + +impl Drop for Stream { + fn drop(&mut self) { + unsafe { + let _ = D::destroy(&mut self.raw); + } + } +} + +#[cfg(test)] +mod tests { + use {Decompress, Flush}; + + #[test] + fn issue51() { + let data = vec![ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xb3, 0xc9, + 0x28, 0xc9, 0xcd, 0xb1, 0xe3, 0xe5, 0xb2, 0xc9, 0x48, 0x4d, 0x4c, 0xb1, + 0xb3, 0x29, 0xc9, 0x2c, 0xc9, 0x49, 0xb5, 0x33, 0x31, 0x30, 0x51, 0xf0, + 0xcb, 0x2f, 0x51, 0x70, 0xcb, 0x2f, 0xcd, 0x4b, 0xb1, 0xd1, 0x87, 0x08, + 0xda, 0xe8, 0x83, 0x95, 0x00, 0x95, 0x26, 0xe5, 0xa7, 0x54, 0x2a, 0x24, + 0xa5, 0x27, 0xe7, 0xe7, 0xe4, 0x17, 0xd9, 0x2a, 0x95, 0x67, 0x64, 0x96, + 0xa4, 0x2a, 0x81, 0x8c, 0x48, 0x4e, 0xcd, 0x2b, 0x49, 0x2d, 0xb2, 0xb3, + 0xc9, 0x30, 0x44, 0x37, 0x01, 0x28, 0x62, 0xa3, 0x0f, 0x95, 0x06, 0xd9, + 0x05, 0x54, 0x04, 0xe5, 0xe5, 0xa5, 0x67, 0xe6, 0x55, 0xe8, 0x1b, 0xea, + 0x99, 0xe9, 0x19, 0x21, 0xab, 0xd0, 0x07, 0xd9, 0x01, 0x32, 0x53, 0x1f, + 0xea, 0x3e, 0x00, 0x94, 0x85, 0xeb, 0xe4, 0xa8, 0x00, 0x00, 0x00 + ]; + + let mut decoded = Vec::with_capacity(data.len()*2); + + let mut d = Decompress::new(false); + // decompressed whole deflate stream + assert!(d.decompress_vec(&data[10..], &mut decoded, Flush::Finish).is_ok()); + + // decompress data that has nothing to do with the deflate stream (this + // used to panic) + drop(d.decompress_vec(&[0], &mut decoded, Flush::None)); + } +} diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/src/zio.rs cargo-0.19.0/vendor/flate2-0.2.17/src/zio.rs --- cargo-0.17.0/vendor/flate2-0.2.17/src/zio.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/src/zio.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,187 @@ +use std::io::prelude::*; +use std::io; +use std::mem; + +use {Decompress, Compress, Status, Flush, DataError}; + +pub struct Writer { + obj: Option, + pub data: D, + buf: Vec, +} + +pub trait Ops { + fn total_in(&self) -> u64; + fn total_out(&self) -> u64; + fn run(&mut self, input: &[u8], output: &mut [u8], flush: Flush) + -> Result; + fn run_vec(&mut self, input: &[u8], output: &mut Vec, flush: Flush) + -> Result; +} + +impl Ops for Compress { + fn total_in(&self) -> u64 { self.total_in() } + fn total_out(&self) -> u64 { self.total_out() } + fn run(&mut self, input: &[u8], output: &mut [u8], flush: Flush) + -> Result { + Ok(self.compress(input, output, flush)) + } + fn run_vec(&mut self, input: &[u8], output: &mut Vec, flush: Flush) + -> Result { + Ok(self.compress_vec(input, output, flush)) + } +} + +impl Ops for Decompress { + fn total_in(&self) -> u64 { self.total_in() } + fn total_out(&self) -> u64 { self.total_out() } + fn run(&mut self, input: &[u8], output: &mut [u8], flush: Flush) + -> Result { + self.decompress(input, output, flush) + } + fn run_vec(&mut self, input: &[u8], output: &mut Vec, flush: Flush) + -> Result { + self.decompress_vec(input, output, flush) + } +} + +pub fn read(obj: &mut R, data: &mut D, dst: &mut [u8]) -> io::Result + where R: BufRead, D: Ops +{ + loop { + let (read, consumed, ret, eof); + { + let input = try!(obj.fill_buf()); + eof = input.is_empty(); + let before_out = data.total_out(); + let before_in = data.total_in(); + let flush = if eof {Flush::Finish} else {Flush::None}; + ret = data.run(input, dst, flush); + read = (data.total_out() - before_out) as usize; + consumed = (data.total_in() - before_in) as usize; + } + obj.consume(consumed); + + match ret { + // If we haven't ready any data and we haven't hit EOF yet, + // then we need to keep asking for more data because if we + // return that 0 bytes of data have been read then it will + // be interpreted as EOF. + Ok(Status::Ok) | + Ok(Status::BufError) if read == 0 && !eof && dst.len() > 0 => { + continue + } + Ok(Status::Ok) | + Ok(Status::BufError) | + Ok(Status::StreamEnd) => return Ok(read), + + Err(..) => return Err(io::Error::new(io::ErrorKind::InvalidInput, + "corrupt deflate stream")) + } + } +} + +impl Writer { + pub fn new(w: W, d: D) -> Writer { + Writer { + obj: Some(w), + data: d, + buf: Vec::with_capacity(32 * 1024), + } + } + + pub fn finish(&mut self) -> io::Result<()> { + loop { + try!(self.dump()); + + let before = self.data.total_out(); + try!(self.data.run_vec(&[], &mut self.buf, Flush::Finish)); + if before == self.data.total_out() { + return Ok(()) + } + } + } + + pub fn replace(&mut self, w: W) -> W { + self.buf.truncate(0); + mem::replace(&mut self.obj, Some(w)).unwrap() + } + + pub fn get_mut(&mut self) -> Option<&mut W> { + self.obj.as_mut() + } + + pub fn take_inner(&mut self) -> Option { + self.obj.take() + } + + pub fn into_inner(mut self) -> W { + self.take_inner().unwrap() + } + + fn dump(&mut self) -> io::Result<()> { + if self.buf.len() > 0 { + try!(self.obj.as_mut().unwrap().write_all(&self.buf)); + self.buf.truncate(0); + } + Ok(()) + } +} + +impl Write for Writer { + fn write(&mut self, buf: &[u8]) -> io::Result { + // miniz isn't guaranteed to actually write any of the buffer provided, + // it may be in a flushing mode where it's just giving us data before + // we're actually giving it any data. We don't want to spuriously return + // `Ok(0)` when possible as it will cause calls to write_all() to fail. + // As a result we execute this in a loop to ensure that we try our + // darndest to write the data. + loop { + try!(self.dump()); + + let before_in = self.data.total_in(); + let ret = self.data.run_vec(buf, &mut self.buf, Flush::None); + let written = (self.data.total_in() - before_in) as usize; + + if buf.len() > 0 && written == 0 && ret.is_ok() { + continue + } + return match ret { + Ok(Status::Ok) | + Ok(Status::BufError) | + Ok(Status::StreamEnd) => Ok(written), + + Err(..) => Err(io::Error::new(io::ErrorKind::InvalidInput, + "corrupt deflate stream")) + } + } + } + + fn flush(&mut self) -> io::Result<()> { + self.data.run_vec(&[], &mut self.buf, Flush::Sync).unwrap(); + + // Unfortunately miniz doesn't actually tell us when we're done with + // pulling out all the data from the internal stream. To remedy this we + // have to continually ask the stream for more memory until it doesn't + // give us a chunk of memory the same size as our own internal buffer, + // at which point we assume it's reached the end. + loop { + try!(self.dump()); + let before = self.data.total_out(); + self.data.run_vec(&[], &mut self.buf, Flush::None).unwrap(); + if before == self.data.total_out() { + break + } + } + + self.obj.as_mut().unwrap().flush() + } +} + +impl Drop for Writer { + fn drop(&mut self) { + if self.obj.is_some() { + let _ = self.finish(); + } + } +} diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/src/zlib.rs cargo-0.19.0/vendor/flate2-0.2.17/src/zlib.rs --- cargo-0.17.0/vendor/flate2-0.2.17/src/zlib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/src/zlib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,545 @@ +//! ZLIB compression and decompression of streams + +use std::io::prelude::*; +use std::io; +use std::mem; + +use bufreader::BufReader; +use zio; +use {Compress, Decompress}; + +/// A ZLIB encoder, or compressor. +/// +/// This structure implements a `Write` interface and takes a stream of +/// uncompressed data, writing the compressed data to the wrapped writer. +pub struct EncoderWriter { + inner: zio::Writer, +} + +/// A ZLIB encoder, or compressor. +/// +/// This structure implements a `Read` interface and will read uncompressed +/// data from an underlying stream and emit a stream of compressed data. +pub struct EncoderReader { + inner: EncoderReaderBuf>, +} + +/// A ZLIB encoder, or compressor. +/// +/// This structure implements a `BufRead` interface and will read uncompressed +/// data from an underlying stream and emit a stream of compressed data. +pub struct EncoderReaderBuf { + obj: R, + data: Compress, +} + +/// A ZLIB decoder, or decompressor. +/// +/// This structure implements a `Read` interface and takes a stream of +/// compressed data as input, providing the decompressed data when read from. +pub struct DecoderReader { + inner: DecoderReaderBuf>, +} + +/// A ZLIB decoder, or decompressor. +/// +/// This structure implements a `BufRead` interface and takes a stream of +/// compressed data as input, providing the decompressed data when read from. +pub struct DecoderReaderBuf { + obj: R, + data: Decompress, +} + +/// A ZLIB decoder, or decompressor. +/// +/// This structure implements a `Write` and will emit a stream of decompressed +/// data when fed a stream of compressed data. +pub struct DecoderWriter { + inner: zio::Writer, +} + +impl EncoderWriter { + /// Creates a new encoder which will write compressed data to the stream + /// given at the given compression level. + /// + /// When this encoder is dropped or unwrapped the final pieces of data will + /// be flushed. + pub fn new(w: W, level: ::Compression) -> EncoderWriter { + EncoderWriter { + inner: zio::Writer::new(w, Compress::new(level, true)), + } + } + + /// Resets the state of this encoder entirely, swapping out the output + /// stream for another. + /// + /// This function will finish encoding the current stream into the current + /// output stream before swapping out the two output streams. If the stream + /// cannot be finished an error is returned. + /// + /// After the current stream has been finished, this will reset the internal + /// state of this encoder and replace the output stream with the one + /// provided, returning the previous output stream. Future data written to + /// this encoder will be the compressed into the stream `w` provided. + pub fn reset(&mut self, w: W) -> io::Result { + try!(self.inner.finish()); + self.inner.data.reset(); + Ok(self.inner.replace(w)) + } + + /// Consumes this encoder, flushing the output stream. + /// + /// This will flush the underlying data stream and then return the contained + /// writer if the flush succeeded. + pub fn finish(mut self) -> io::Result { + try!(self.inner.finish()); + Ok(self.inner.into_inner()) + } +} + +impl Write for EncoderWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +impl EncoderReader { + /// Creates a new encoder which will read uncompressed data from the given + /// stream and emit the compressed stream. + pub fn new(r: R, level: ::Compression) -> EncoderReader { + EncoderReader { + inner: EncoderReaderBuf::new(BufReader::new(r), level), + } + } + + /// Resets the state of this encoder entirely, swapping out the input + /// stream for another. + /// + /// This function will reset the internal state of this encoder and replace + /// the input stream with the one provided, returning the previous input + /// stream. Future data read from this encoder will be the compressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + self.inner.data.reset(); + self.inner.obj.reset(r) + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this encoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } +} + +impl Read for EncoderReader { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.read(buf) + } +} + +impl EncoderReaderBuf { + /// Creates a new encoder which will read uncompressed data from the given + /// stream and emit the compressed stream. + pub fn new(r: R, level: ::Compression) -> EncoderReaderBuf { + EncoderReaderBuf { + obj: r, + data: Compress::new(level, true), + } + } + + /// Resets the state of this encoder entirely, swapping out the input + /// stream for another. + /// + /// This function will reset the internal state of this encoder and replace + /// the input stream with the one provided, returning the previous input + /// stream. Future data read from this encoder will be the compressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + self.data.reset(); + mem::replace(&mut self.obj, r) + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + &self.obj + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + &mut self.obj + } + + /// Consumes this encoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.obj + } +} + +impl Read for EncoderReaderBuf { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + zio::read(&mut self.obj, &mut self.data, buf) + } +} + +impl DecoderReader { + /// Creates a new decoder which will decompress data read from the given + /// stream. + pub fn new(r: R) -> DecoderReader { + DecoderReader::new_with_buf(r, vec![0; 32 * 1024]) + } + + /// Same as `new`, but the intermediate buffer for data is specified. + /// + /// Note that the specified buffer will only be used up to its current + /// length. The buffer's capacity will also not grow over time. + pub fn new_with_buf(r: R, buf: Vec) -> DecoderReader { + DecoderReader { + inner: DecoderReaderBuf::new(BufReader::with_buf(buf, r)), + } + } + + /// Resets the state of this decoder entirely, swapping out the input + /// stream for another. + /// + /// This will reset the internal state of this decoder and replace the + /// input stream with the one provided, returning the previous input + /// stream. Future data read from this decoder will be the decompressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + self.inner.data = Decompress::new(true); + self.inner.obj.reset(r) + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + self.inner.get_ref().get_ref() + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + self.inner.get_mut().get_mut() + } + + /// Consumes this decoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.inner.into_inner().into_inner() + } + + /// Returns the number of bytes that the decompressor has consumed. + /// + /// Note that this will likely be smaller than what the decompressor + /// actually read from the underlying stream due to buffering. + pub fn total_in(&self) -> u64 { + self.inner.total_in() + } + + /// Returns the number of bytes that the decompressor has produced. + pub fn total_out(&self) -> u64 { + self.inner.total_out() + } +} + +impl Read for DecoderReader { + fn read(&mut self, into: &mut [u8]) -> io::Result { + self.inner.read(into) + } +} + +impl DecoderReaderBuf { + /// Creates a new decoder which will decompress data read from the given + /// stream. + pub fn new(r: R) -> DecoderReaderBuf { + DecoderReaderBuf { + obj: r, + data: Decompress::new(true), + } + } + + /// Resets the state of this decoder entirely, swapping out the input + /// stream for another. + /// + /// This will reset the internal state of this decoder and replace the + /// input stream with the one provided, returning the previous input + /// stream. Future data read from this decoder will be the decompressed + /// version of `r`'s data. + pub fn reset(&mut self, r: R) -> R { + self.data = Decompress::new(true); + mem::replace(&mut self.obj, r) + } + + /// Acquires a reference to the underlying stream + pub fn get_ref(&self) -> &R { + &self.obj + } + + /// Acquires a mutable reference to the underlying stream + /// + /// Note that mutation of the stream may result in surprising results if + /// this encoder is continued to be used. + pub fn get_mut(&mut self) -> &mut R { + &mut self.obj + } + + /// Consumes this decoder, returning the underlying reader. + pub fn into_inner(self) -> R { + self.obj + } + + /// Returns the number of bytes that the decompressor has consumed. + /// + /// Note that this will likely be smaller than what the decompressor + /// actually read from the underlying stream due to buffering. + pub fn total_in(&self) -> u64 { + self.data.total_in() + } + + /// Returns the number of bytes that the decompressor has produced. + pub fn total_out(&self) -> u64 { + self.data.total_out() + } +} + +impl Read for DecoderReaderBuf { + fn read(&mut self, into: &mut [u8]) -> io::Result { + zio::read(&mut self.obj, &mut self.data, into) + } +} + +impl DecoderWriter { + /// Creates a new decoder which will write uncompressed data to the stream. + /// + /// When this decoder is dropped or unwrapped the final pieces of data will + /// be flushed. + pub fn new(w: W) -> DecoderWriter { + DecoderWriter { + inner: zio::Writer::new(w, Decompress::new(true)), + } + } + + /// Resets the state of this decoder entirely, swapping out the output + /// stream for another. + /// + /// This will reset the internal state of this decoder and replace the + /// output stream with the one provided, returning the previous output + /// stream. Future data written to this decoder will be decompressed into + /// the output stream `w`. + pub fn reset(&mut self, w: W) -> io::Result { + try!(self.inner.finish()); + self.inner.data = Decompress::new(true); + Ok(self.inner.replace(w)) + } + + /// Consumes this encoder, flushing the output stream. + /// + /// This will flush the underlying data stream and then return the contained + /// writer if the flush succeeded. + pub fn finish(mut self) -> io::Result { + try!(self.inner.finish()); + Ok(self.inner.into_inner()) + } + + /// Returns the number of bytes that the decompressor has consumed for + /// decompression. + /// + /// Note that this will likely be smaller than the number of bytes + /// successfully written to this stream due to internal buffering. + pub fn total_in(&self) -> u64 { + self.inner.data.total_in() + } + + /// Returns the number of bytes that the decompressor has written to its + /// output stream. + pub fn total_out(&self) -> u64 { + self.inner.data.total_out() + } +} + +impl Write for DecoderWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +#[cfg(test)] +mod tests { + use std::io::prelude::*; + use std::io; + + use rand::{thread_rng, Rng}; + + use zlib::{EncoderWriter, EncoderReader, DecoderReader, DecoderWriter}; + use Compression::Default; + + #[test] + fn roundtrip() { + let mut real = Vec::new(); + let mut w = EncoderWriter::new(Vec::new(), Default); + let v = thread_rng().gen_iter::().take(1024).collect::>(); + for _ in 0..200 { + let to_write = &v[..thread_rng().gen_range(0, v.len())]; + real.extend(to_write.iter().map(|x| *x)); + w.write_all(to_write).unwrap(); + } + let result = w.finish().unwrap(); + let mut r = DecoderReader::new(&result[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == real); + } + + #[test] + fn drop_writes() { + let mut data = Vec::new(); + EncoderWriter::new(&mut data, Default).write_all(b"foo").unwrap(); + let mut r = DecoderReader::new(&data[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == b"foo"); + } + + #[test] + fn total_in() { + let mut real = Vec::new(); + let mut w = EncoderWriter::new(Vec::new(), Default); + let v = thread_rng().gen_iter::().take(1024).collect::>(); + for _ in 0..200 { + let to_write = &v[..thread_rng().gen_range(0, v.len())]; + real.extend(to_write.iter().map(|x| *x)); + w.write_all(to_write).unwrap(); + } + let mut result = w.finish().unwrap(); + + let result_len = result.len(); + + for _ in 0..200 { + result.extend(v.iter().map(|x| *x)); + } + + let mut r = DecoderReader::new(&result[..]); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert!(ret == real); + assert_eq!(r.total_in(), result_len as u64); + } + + #[test] + fn roundtrip2() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)); + let mut ret = Vec::new(); + r.read_to_end(&mut ret).unwrap(); + assert_eq!(ret, v); + } + + #[test] + fn roundtrip3() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut w = EncoderWriter::new(DecoderWriter::new(Vec::new()), Default); + w.write_all(&v).unwrap(); + let w = w.finish().unwrap().finish().unwrap(); + assert!(w == v); + } + + #[test] + fn reset_decoder() { + let v = thread_rng() + .gen_iter::() + .take(1024 * 1024) + .collect::>(); + let mut w = EncoderWriter::new(Vec::new(), Default); + w.write_all(&v).unwrap(); + let data = w.finish().unwrap(); + + { + let (mut a, mut b, mut c) = (Vec::new(), Vec::new(), Vec::new()); + let mut r = DecoderReader::new(&data[..]); + r.read_to_end(&mut a).unwrap(); + r.reset(&data); + r.read_to_end(&mut b).unwrap(); + + let mut r = DecoderReader::new(&data[..]); + r.read_to_end(&mut c).unwrap(); + assert!(a == b && b == c && c == v); + } + + { + let mut w = DecoderWriter::new(Vec::new()); + w.write_all(&data).unwrap(); + let a = w.reset(Vec::new()).unwrap(); + w.write_all(&data).unwrap(); + let b = w.finish().unwrap(); + + let mut w = DecoderWriter::new(Vec::new()); + w.write_all(&data).unwrap(); + let c = w.finish().unwrap(); + assert!(a == b && b == c && c == v); + } + } + + #[test] + fn bad_input() { + // regress tests: previously caused a panic on drop + let mut out: Vec = Vec::new(); + let data: Vec = (0..255).cycle().take(1024).collect(); + let mut w = DecoderWriter::new(&mut out); + match w.write_all(&data[..]) { + Ok(_) => panic!("Expected an error to be returned!"), + Err(e) => assert_eq!(e.kind(), io::ErrorKind::InvalidInput), + } + } + + #[test] + fn qc_reader() { + ::quickcheck::quickcheck(test as fn(_) -> _); + + fn test(v: Vec) -> bool { + let mut r = DecoderReader::new(EncoderReader::new(&v[..], Default)); + let mut v2 = Vec::new(); + r.read_to_end(&mut v2).unwrap(); + v == v2 + } + } + + #[test] + fn qc_writer() { + ::quickcheck::quickcheck(test as fn(_) -> _); + + fn test(v: Vec) -> bool { + let mut w = EncoderWriter::new(DecoderWriter::new(Vec::new()), Default); + w.write_all(&v).unwrap(); + v == w.finish().unwrap().finish().unwrap() + } + } +} Binary files /tmp/tmplxiqkI/U7kx02RAH_/cargo-0.17.0/vendor/flate2-0.2.17/tests/corrupt-file.gz and /tmp/tmplxiqkI/L4nBRNMLnv/cargo-0.19.0/vendor/flate2-0.2.17/tests/corrupt-file.gz differ Binary files /tmp/tmplxiqkI/U7kx02RAH_/cargo-0.17.0/vendor/flate2-0.2.17/tests/good-file.gz and /tmp/tmplxiqkI/L4nBRNMLnv/cargo-0.19.0/vendor/flate2-0.2.17/tests/good-file.gz differ diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/tests/good-file.txt cargo-0.19.0/vendor/flate2-0.2.17/tests/good-file.txt --- cargo-0.17.0/vendor/flate2-0.2.17/tests/good-file.txt 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/tests/good-file.txt 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,733 @@ +## ## +timestep simulated EIR patent hosts +0 0.136402 16855 +1 0.146872 18564 +2 0.150157 20334 +3 0.146358 22159 +4 0.136315 23655 +5 0.122354 24848 +6 0.104753 25887 +7 0.084439 26770 +8 0.06417 27238 +9 0.0450397 27349 +10 0.0295473 27274 +11 0.0184662 26909 +12 0.0110032 26324 +13 0.00634348 25513 +14 0.0036144 24469 +15 0.00208133 23383 +16 0.00122468 22345 +17 0.000752514 21342 +18 0.000545333 20416 +19 0.000546139 19657 +20 0.00054572 18806 +21 0.000545757 18015 +22 0.000545898 17349 +23 0.000546719 16594 +24 0.000547353 15955 +25 0.000547944 15374 +26 0.000547606 14765 +27 0.000594773 14212 +28 0.000969163 13677 +29 0.00168295 13180 +30 0.003059 12760 +31 0.00571599 12313 +32 0.0107918 11896 +33 0.0201943 11512 +34 0.0368013 11340 +35 0.0640629 11323 +36 0.104447 11769 +37 0.157207 12728 +38 0.216682 14261 +39 0.271159 16491 +40 0.303552 19274 +41 0.303678 22157 +42 0.271945 24875 +43 0.215445 27027 +44 0.154503 28690 +45 0.100717 30046 +46 0.0600343 30602 +47 0.0328576 30709 +48 0.016964 30315 +49 0.00841526 29310 +50 0.0040958 28058 +51 0.0019953 26662 +52 0.000986531 25259 +53 0.000545786 24049 +54 0.000546405 22966 +55 0.000546036 21933 +56 0.00054427 20953 +57 0.000542769 20057 +58 0.000541566 19304 +59 0.000541822 18477 +60 0.000541643 17695 +61 0.000541989 17002 +62 0.000769298 16391 +63 0.00150811 15805 +64 0.00295097 15172 +65 0.00566197 14690 +66 0.0105243 14206 +67 0.0186965 13791 +68 0.0313363 13470 +69 0.0490605 13377 +70 0.0711679 13631 +71 0.0953625 14209 +72 0.118026 15277 +73 0.134612 16760 +74 0.144311 18339 +75 0.146328 20124 +76 0.142936 21803 +77 0.134029 23435 +78 0.120562 24854 +79 0.103157 25880 +80 0.0834054 26597 +81 0.0632474 27226 +82 0.0447785 27294 +83 0.0295654 27169 +84 0.0184081 26803 +85 0.0109489 26265 +86 0.00631234 25375 +87 0.00359978 24306 +88 0.00206967 23260 +89 0.00122197 22225 +90 0.000751031 21277 +91 0.000544507 20295 +92 0.000543897 19417 +93 0.000543483 18623 +94 0.000542926 17837 +95 0.000542685 17070 +96 0.000542387 16424 +97 0.000541194 15838 +98 0.000540427 15177 +99 0.000540774 14608 +100 0.000588312 14066 +101 0.000959183 13499 +102 0.00166774 12979 +103 0.00303278 12545 +104 0.00567457 12067 +105 0.0107272 11712 +106 0.0200606 11368 +107 0.0364637 11207 +108 0.063339 11238 +109 0.103717 11660 +110 0.156884 12621 +111 0.217072 14151 +112 0.272311 16358 +113 0.305046 19005 +114 0.304927 21926 +115 0.272427 24662 +116 0.216478 27080 +117 0.155168 29064 +118 0.10079 30370 +119 0.0599659 30992 +120 0.0331287 30975 +121 0.017235 30317 +122 0.00860221 29455 +123 0.00419286 28172 +124 0.00203361 26809 +125 0.000998847 25476 +126 0.000551418 24230 +127 0.000551119 23106 +128 0.000552786 22147 +129 0.000553814 21183 +130 0.000553743 20280 +131 0.000554428 19423 +132 0.000555022 18598 +133 0.000555921 17864 +134 0.000556687 17187 +135 0.000789996 16527 +136 0.00154597 15870 +137 0.00302776 15226 +138 0.00581484 14685 +139 0.010812 14234 +140 0.0191832 13818 +141 0.0321572 13571 +142 0.050328 13538 +143 0.072817 13812 +144 0.0974321 14368 +145 0.120225 15436 +146 0.137418 16988 +147 0.147086 18775 +148 0.149165 20563 +149 0.144943 22223 +150 0.136631 23741 +151 0.123355 24920 +152 0.105401 25779 +153 0.0851918 26781 +154 0.0641702 27265 +155 0.0450746 27505 +156 0.0294136 27416 +157 0.0183811 27028 +158 0.0109285 26260 +159 0.00634296 25451 +160 0.00364513 24472 +161 0.0021051 23427 +162 0.00123693 22403 +163 0.000759531 21393 +164 0.000551727 20485 +165 0.000552256 19660 +166 0.000552303 18862 +167 0.000550927 18094 +168 0.000551098 17378 +169 0.000551093 16691 +170 0.000551885 16050 +171 0.000552282 15420 +172 0.000552591 14878 +173 0.00060109 14357 +174 0.000980446 13768 +175 0.00170301 13241 +176 0.003096 12745 +177 0.00579971 12294 +178 0.010976 11879 +179 0.0205422 11636 +180 0.0374515 11431 +181 0.0649916 11517 +182 0.106008 11966 +183 0.159983 12918 +184 0.221127 14484 +185 0.276503 16696 +186 0.310316 19518 +187 0.311205 22301 +188 0.276769 25047 +189 0.220506 27360 +190 0.159123 29133 +191 0.103761 30440 +192 0.0613797 31087 +193 0.033583 31037 +194 0.0173275 30555 +195 0.00861968 29617 +196 0.00419503 28292 +197 0.00203304 26944 +198 0.00100126 25569 +199 0.000553511 24349 +200 0.000554687 23257 +201 0.00055586 22204 +202 0.000555419 21176 +203 0.000556032 20316 +204 0.000555974 19509 +205 0.000556859 18746 +206 0.000556996 17978 +207 0.000557102 17288 +208 0.000790187 16672 +209 0.00154711 16057 +210 0.00303521 15449 +211 0.00584201 14915 +212 0.0108854 14397 +213 0.0193386 14010 +214 0.0324346 13730 +215 0.0507192 13674 +216 0.0736661 13874 +217 0.0987887 14515 +218 0.122411 15693 +219 0.139964 17265 +220 0.149125 18894 +221 0.151434 20662 +222 0.148067 22442 +223 0.138894 24116 +224 0.125436 25367 +225 0.107664 26360 +226 0.0865709 27044 +227 0.0655588 27428 +228 0.0459664 27714 +229 0.0301384 27687 +230 0.0186481 27262 +231 0.01103 26677 +232 0.00636957 25722 +233 0.00366188 24662 +234 0.00212213 23575 +235 0.00125358 22520 +236 0.000768665 21480 +237 0.000556393 20563 +238 0.000555892 19706 +239 0.00055534 18914 +240 0.000555027 18165 +241 0.000555062 17432 +242 0.000553766 16733 +243 0.000552984 16070 +244 0.000553634 15396 +245 0.000554286 14867 +246 0.000603759 14362 +247 0.000982974 13867 +248 0.00170532 13379 +249 0.00310471 12907 +250 0.00582577 12446 +251 0.0110122 12018 +252 0.0206284 11730 +253 0.0375835 11546 +254 0.0652192 11605 +255 0.10646 11981 +256 0.160858 12949 +257 0.223122 14478 +258 0.279678 16810 +259 0.312171 19452 +260 0.311778 22391 +261 0.276966 25204 +262 0.22251 27379 +263 0.159246 29248 +264 0.104109 30532 +265 0.0617903 30995 +266 0.0338421 31042 +267 0.0174647 30620 +268 0.00867821 29589 +269 0.00419968 28293 +270 0.00203244 26916 +271 0.00100204 25464 +272 0.000555586 24219 +273 0.000555599 23207 +274 0.00055582 22187 +275 0.00055516 21136 +276 0.000555436 20243 +277 0.000555618 19426 +278 0.000556778 18635 +279 0.000556976 17870 +280 0.000557162 17190 +281 0.0007904 16506 +282 0.00154557 15837 +283 0.00302973 15234 +284 0.00584543 14717 +285 0.0108796 14225 +286 0.0192919 13810 +287 0.032329 13605 +288 0.0505293 13536 +289 0.0733417 13760 +290 0.0982413 14378 +291 0.121477 15400 +292 0.138636 17017 +293 0.14875 18764 +294 0.150515 20516 +295 0.146372 22389 +296 0.137332 23975 +297 0.124076 25120 +298 0.106469 26137 +299 0.0862987 26973 +300 0.0650552 27584 +301 0.0456456 27741 +302 0.0300744 27565 +303 0.0187879 27212 +304 0.0112085 26432 +305 0.00648306 25501 +306 0.00370346 24466 +307 0.00213399 23472 +308 0.00125463 22415 +309 0.000765794 21427 +310 0.000552587 20533 +311 0.000553175 19632 +312 0.000553525 18831 +313 0.000554941 18119 +314 0.000556327 17336 +315 0.000556008 16721 +316 0.00055593 16086 +317 0.000556421 15516 +318 0.000557308 14918 +319 0.00060681 14402 +320 0.000990746 13849 +321 0.00172359 13355 +322 0.00313688 12902 +323 0.0058708 12425 +324 0.0110637 12087 +325 0.0206777 11743 +326 0.0376394 11531 +327 0.0656182 11582 +328 0.107414 12034 +329 0.162101 12955 +330 0.223525 14571 +331 0.279935 16842 +332 0.314601 19566 +333 0.313556 22575 +334 0.279571 25279 +335 0.221638 27642 +336 0.158038 29275 +337 0.102505 30638 +338 0.0608328 31209 +339 0.0335531 31260 +340 0.0173332 30520 +341 0.00861545 29604 +342 0.00419454 28370 +343 0.00202587 26940 +344 0.000994029 25614 +345 0.000549339 24445 +346 0.000551477 23239 +347 0.000552891 22300 +348 0.000551775 21280 +349 0.000552425 20424 +350 0.000552135 19571 +351 0.000552542 18753 +352 0.000552863 18058 +353 0.000554438 17348 +354 0.000786735 16671 +355 0.00153958 16047 +356 0.00301482 15500 +357 0.00580589 14883 +358 0.0108227 14347 +359 0.0192357 13947 +360 0.0321613 13672 +361 0.050229 13606 +362 0.0729462 13815 +363 0.0978564 14566 +364 0.120879 15674 +365 0.137663 17049 +366 0.147092 18813 +367 0.150184 20578 +368 0.146971 22245 +369 0.136769 23723 +370 0.12367 24905 +371 0.106187 25871 +372 0.0860921 26687 +373 0.0645899 27375 +374 0.0453473 27635 +375 0.0298122 27551 +376 0.0185448 27134 +377 0.0110517 26468 +378 0.00640294 25661 +379 0.00367011 24653 +380 0.00211832 23556 +381 0.00125246 22513 +382 0.00076891 21568 +383 0.000557384 20672 +384 0.000557295 19811 +385 0.000556837 18982 +386 0.000557433 18179 +387 0.000557376 17457 +388 0.000557751 16720 +389 0.000556844 16112 +390 0.000555603 15479 +391 0.000554871 14809 +392 0.00060335 14275 +393 0.000982808 13757 +394 0.00170757 13221 +395 0.00310351 12758 +396 0.0058181 12286 +397 0.010991 11906 +398 0.0205342 11557 +399 0.0373486 11393 +400 0.0647659 11487 +401 0.105589 11887 +402 0.15967 12798 +403 0.220945 14260 +404 0.277122 16477 +405 0.310108 19295 +406 0.308854 22110 +407 0.274911 24915 +408 0.218618 27273 +409 0.156618 29189 +410 0.101775 30572 +411 0.0607503 31174 +412 0.0334708 31316 +413 0.0173443 30731 +414 0.00865633 29636 +415 0.00421141 28342 +416 0.00204387 26991 +417 0.00100602 25595 +418 0.000555131 24336 +419 0.000555037 23251 +420 0.000555559 22267 +421 0.000554916 21212 +422 0.000554432 20306 +423 0.000554751 19488 +424 0.00055638 18727 +425 0.000556727 17927 +426 0.000556368 17198 +427 0.000788004 16578 +428 0.00154404 15944 +429 0.00302383 15315 +430 0.00582586 14786 +431 0.0108457 14290 +432 0.0192962 13815 +433 0.0323072 13561 +434 0.0505101 13456 +435 0.0732162 13811 +436 0.0978737 14403 +437 0.121405 15460 +438 0.138202 16993 +439 0.1482 18710 +440 0.149707 20578 +441 0.146945 22256 +442 0.137785 23713 +443 0.123767 25058 +444 0.105989 26087 +445 0.085483 26759 +446 0.0646144 27375 +447 0.0454389 27680 +448 0.0299337 27531 +449 0.018663 27041 +450 0.0111347 26416 +451 0.00644197 25614 +452 0.00369229 24666 +453 0.00211986 23647 +454 0.00124761 22650 +455 0.000769104 21642 +456 0.000558796 20693 +457 0.000559908 19746 +458 0.000559562 18952 +459 0.00056042 18100 +460 0.000559447 17401 +461 0.000557893 16756 +462 0.000557137 16148 +463 0.000557269 15504 +464 0.000557596 14974 +465 0.000606298 14408 +466 0.000987712 13909 +467 0.00171257 13402 +468 0.00311667 12891 +469 0.00584794 12433 +470 0.0110774 11980 +471 0.0207006 11713 +472 0.037673 11583 +473 0.0654988 11677 +474 0.106982 12072 +475 0.161926 12898 +476 0.224327 14548 +477 0.281709 16796 +478 0.314567 19512 +479 0.313419 22428 +480 0.278962 25186 +481 0.221864 27755 +482 0.158559 29556 +483 0.103532 30572 +484 0.0611592 31162 +485 0.0337539 31197 +486 0.0175096 30619 +487 0.00865906 29606 +488 0.00420125 28271 +489 0.00203207 26856 +490 0.00100238 25542 +491 0.000554405 24306 +492 0.00055373 23160 +493 0.0005552 22152 +494 0.000553776 21192 +495 0.000553636 20302 +496 0.000553165 19505 +497 0.000554014 18719 +498 0.00055519 17993 +499 0.000556582 17233 +500 0.000788165 16569 +501 0.00154132 15953 +502 0.00302099 15350 +503 0.00581186 14752 +504 0.0108291 14267 +505 0.0192368 13946 +506 0.0322191 13677 +507 0.0503789 13594 +508 0.0730706 13768 +509 0.0980646 14416 +510 0.121601 15634 +511 0.139046 17110 +512 0.147779 18876 +513 0.149612 20734 +514 0.145796 22414 +515 0.136936 23884 +516 0.123807 25078 +517 0.106212 26066 +518 0.0855482 26779 +519 0.0643386 27340 +520 0.0452926 27530 +521 0.0298659 27573 +522 0.0185447 27169 +523 0.0110178 26489 +524 0.00635235 25588 +525 0.00362881 24549 +526 0.00209238 23528 +527 0.00123133 22541 +528 0.000755917 21498 +529 0.000546368 20607 +530 0.000547382 19712 +531 0.000547084 18975 +532 0.000546453 18178 +533 0.000546062 17452 +534 0.000546085 16749 +535 0.000546151 16135 +536 0.000545628 15567 +537 0.000545969 14968 +538 0.000594606 14392 +539 0.000968849 13854 +540 0.00168489 13360 +541 0.00306337 12899 +542 0.00573505 12407 +543 0.0108348 12017 +544 0.02025 11713 +545 0.0368201 11517 +546 0.0639795 11556 +547 0.104882 11941 +548 0.158923 12854 +549 0.219796 14396 +550 0.275801 16733 +551 0.307622 19367 +552 0.30785 22230 +553 0.272898 24873 +554 0.217351 27152 +555 0.156138 29108 +556 0.101477 30379 +557 0.0601091 30971 +558 0.0331551 31126 +559 0.017167 30418 +560 0.00853886 29430 +561 0.00415201 28190 +562 0.00201849 26849 +563 0.000991957 25528 +564 0.000546751 24180 +565 0.00054534 23090 +566 0.000544403 22096 +567 0.00054368 21140 +568 0.000543407 20213 +569 0.000544421 19405 +570 0.000545241 18625 +571 0.000546995 17868 +572 0.000547101 17102 +573 0.00077428 16423 +574 0.00151348 15783 +575 0.00296212 15220 +576 0.00569555 14602 +577 0.0106307 14154 +578 0.0188783 13743 +579 0.0316572 13538 +580 0.0495211 13467 +581 0.0718936 13665 +582 0.0961304 14240 +583 0.119127 15341 +584 0.136233 16912 +585 0.145327 18567 +586 0.146983 20301 +587 0.143022 21953 +588 0.134931 23439 +589 0.121892 24750 +590 0.103955 25688 +591 0.0833804 26253 +592 0.0625106 26918 +593 0.0440419 27279 +594 0.0290823 27159 +595 0.0180758 26786 +596 0.0107654 26049 +597 0.00622673 25202 +598 0.00356716 24168 +599 0.00205866 23122 +600 0.00121254 22076 +601 0.000745744 21100 +602 0.000537789 20207 +603 0.000537982 19340 +604 0.000537795 18527 +605 0.000537955 17768 +606 0.000539259 17117 +607 0.00053942 16425 +608 0.000540477 15701 +609 0.000540424 15134 +610 0.000540084 14558 +611 0.00058571 14069 +612 0.00095364 13498 +613 0.00165505 13054 +614 0.00300205 12616 +615 0.00561724 12142 +616 0.0106079 11720 +617 0.0198178 11410 +618 0.0360368 11231 +619 0.0623418 11314 +620 0.101856 11688 +621 0.15376 12623 +622 0.213046 14078 +623 0.267285 16225 +624 0.299225 18856 +625 0.299517 21756 +626 0.26697 24652 +627 0.2119 27051 +628 0.151393 28925 +629 0.098869 30065 +630 0.0593653 30570 +631 0.0327177 30483 +632 0.0170081 29735 +633 0.0084493 28844 +634 0.00409333 27665 +635 0.00197466 26356 +636 0.000967996 25009 +637 0.000533137 23839 +638 0.000532992 22721 +639 0.000534258 21676 +640 0.000534251 20709 +641 0.000534556 19798 +642 0.000535287 19008 +643 0.000536214 18278 +644 0.000536647 17547 +645 0.000536556 16901 +646 0.000761043 16256 +647 0.00149108 15621 +648 0.00292808 15032 +649 0.0056527 14504 +650 0.0105421 14010 +651 0.0186823 13646 +652 0.0312164 13356 +653 0.0485643 13404 +654 0.0704061 13612 +655 0.0945219 14230 +656 0.117178 15374 +657 0.134568 16843 +658 0.144475 18492 +659 0.146915 20238 +660 0.14393 21958 +661 0.134621 23537 +662 0.121737 24773 +663 0.104744 25772 +664 0.0846226 26427 +665 0.0639754 27040 +666 0.0448457 27279 +667 0.029482 27106 +668 0.0183036 26853 +669 0.0108721 26178 +670 0.00627116 25425 +671 0.0035776 24326 +672 0.00206466 23279 +673 0.00122064 22191 +674 0.000751578 21231 +675 0.000542574 20323 +676 0.000540396 19496 +677 0.000538805 18651 +678 0.00053881 17920 +679 0.000537801 17217 +680 0.000537866 16520 +681 0.000538522 15876 +682 0.000538795 15229 +683 0.000539519 14656 +684 0.000587348 14121 +685 0.000955855 13626 +686 0.00165656 13086 +687 0.00301095 12666 +688 0.00564993 12250 +689 0.0106767 11869 +690 0.0199729 11524 +691 0.03641 11331 +692 0.0632378 11402 +693 0.103483 11788 +694 0.156399 12682 +695 0.215591 14337 +696 0.269462 16547 +697 0.303615 19239 +698 0.304506 22023 +699 0.273068 24769 +700 0.21682 27223 +701 0.154934 29029 +702 0.100495 30241 +703 0.0597382 30801 +704 0.0329221 30881 +705 0.0170591 30288 +706 0.00845353 29329 +707 0.00408176 28108 +708 0.00198037 26715 +709 0.000977102 25340 +710 0.000541566 24039 +711 0.000542333 22965 +712 0.000542417 21858 +713 0.000541182 20952 +714 0.00054038 20049 +715 0.000539725 19192 +716 0.000539603 18409 +717 0.000539754 17700 +718 0.000539679 16960 +719 0.000763508 16287 +720 0.00149327 15637 +721 0.00292609 15057 +722 0.00563308 14524 +723 0.0104893 14003 +724 0.0185874 13625 +725 0.0310985 13319 +726 0.0487417 13278 +727 0.0707124 13502 +728 0.0947795 14147 +729 0.117155 15183 +730 0.133995 16622 diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/tests/gunzip.rs cargo-0.19.0/vendor/flate2-0.2.17/tests/gunzip.rs --- cargo-0.17.0/vendor/flate2-0.2.17/tests/gunzip.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/tests/gunzip.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,31 @@ +extern crate flate2; + +use std::fs::File; +use std::io::prelude::*; +use std::io; +use std::path::Path; +use flate2::read::GzDecoder; + +// test extraction of a gzipped file +#[test] +fn test_extract_success() { + let content = extract_file(Path::new("tests/good-file.gz")).unwrap(); + let mut expected = Vec::new(); + File::open("tests/good-file.txt").unwrap().read_to_end(&mut expected).unwrap(); + assert!(content == expected); +} + +// test extraction fails on a corrupt file +#[test] +fn test_extract_failure() { + let result = extract_file(Path::new("tests/corrupt-file.gz")); + assert_eq!(result.err().unwrap().kind(), io::ErrorKind::InvalidInput); +} + +// Tries to extract path into memory (assuming a .gz file). +fn extract_file(path_compressed: &Path) -> io::Result>{ + let mut v = Vec::new(); + let f = try!(File::open(path_compressed)); + try!(try!(GzDecoder::new(f)).read_to_end(&mut v)); + Ok(v) +} diff -Nru cargo-0.17.0/vendor/flate2-0.2.17/.travis.yml cargo-0.19.0/vendor/flate2-0.2.17/.travis.yml --- cargo-0.17.0/vendor/flate2-0.2.17/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/flate2-0.2.17/.travis.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,30 @@ +language: rust +rust: + - stable + - beta + - nightly +sudo: false +before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH +script: + - export CARGO_TARGET_DIR=`pwd`/target + - cargo build --verbose + - cargo test --verbose + - cargo test --verbose --features zlib + - cargo test --verbose --features zlib --no-default-features + - cargo clean && cargo build + - rustdoc --test README.md -L target/debug -L target/debug/deps + - cargo doc --no-deps + - cargo doc --no-deps --manifest-path=miniz-sys/Cargo.toml +after_success: + - travis-cargo --only nightly doc-upload +env: + global: + secure: "PHVT7IaeP5nQQVwGHKwqCYBDp0QyetSlER7se2j2Xgfx+lw3Bu6VWH6VF04B636Gb0tHPN/sUCXSgGRcvDuy6XFOev4LfynoYxNKgHJYg2E34EP2QLwsFfnvE4iujaG3GJk3o935Y7OYGv2OP1HeG4Mv6JhQK0GLnNDBZQ65kWI=" + +notifications: + email: + on_success: never +os: + - linux + - osx diff -Nru cargo-0.17.0/vendor/foreign-types-0.2.0/.cargo-checksum.json cargo-0.19.0/vendor/foreign-types-0.2.0/.cargo-checksum.json --- cargo-0.17.0/vendor/foreign-types-0.2.0/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/foreign-types-0.2.0/.cargo-checksum.json 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"4cc6445feac7e9a1f8f1e1c51cc3afd0cf7bb931e3c5a6f18c41258401652702",".travis.yml":"79581d46c88f18838ff601dc84ee3bd99a49c21151cc442c3c07ede1c3d23858","Cargo.toml":"c51887565ba77e242868b79f534bb16cc61ff4284ccd5f04274919be264cd2f8","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"333ea3aaa3cadb819f4acd9f9153f9feee060a995ca8710f32bc5bd9a4b91734","README.md":"6ccabff53d3a27b9125d72612fc4529d76e6e96f9f5dc97d964542dd61ec9b1f","src/lib.rs":"36f4bbd68e5207beffc98a1772e02da0ef1fac79f6009d1b26029de2bf27529c"},"package":"3e4056b9bd47f8ac5ba12be771f77a0dae796d1bbaaf5fd0b9c2d38b69b8a29d"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/foreign-types-0.2.0/Cargo.toml cargo-0.19.0/vendor/foreign-types-0.2.0/Cargo.toml --- cargo-0.17.0/vendor/foreign-types-0.2.0/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/foreign-types-0.2.0/Cargo.toml 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,11 @@ +[package] +name = "foreign-types" +version = "0.2.0" +authors = ["Steven Fackler "] +license = "MIT/Apache-2.0" +description = "A framework for Rust wrappers over C APIs" +repository = "https://github.com/sfackler/foreign-types" +documentation = "https://docs.rs/foreign-types/0.2.0/foreign_types" +readme = "README.md" + +[dependencies] diff -Nru cargo-0.17.0/vendor/foreign-types-0.2.0/.gitignore cargo-0.19.0/vendor/foreign-types-0.2.0/.gitignore --- cargo-0.17.0/vendor/foreign-types-0.2.0/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/foreign-types-0.2.0/.gitignore 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,4 @@ +target +Cargo.lock +.idea +*.iml diff -Nru cargo-0.17.0/vendor/foreign-types-0.2.0/LICENSE-APACHE cargo-0.19.0/vendor/foreign-types-0.2.0/LICENSE-APACHE --- cargo-0.17.0/vendor/foreign-types-0.2.0/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/foreign-types-0.2.0/LICENSE-APACHE 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff -Nru cargo-0.17.0/vendor/foreign-types-0.2.0/LICENSE-MIT cargo-0.19.0/vendor/foreign-types-0.2.0/LICENSE-MIT --- cargo-0.17.0/vendor/foreign-types-0.2.0/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/foreign-types-0.2.0/LICENSE-MIT 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,19 @@ +Copyright (c) 2017 The foreign-types Developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff -Nru cargo-0.17.0/vendor/foreign-types-0.2.0/README.md cargo-0.19.0/vendor/foreign-types-0.2.0/README.md --- cargo-0.17.0/vendor/foreign-types-0.2.0/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/foreign-types-0.2.0/README.md 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,23 @@ +# foreign-types + +[![Build Status](https://travis-ci.org/sfackler/foreign-types.svg?branch=master)](https://travis-ci.org/sfackler/foreign-types) + +[Documentation](https://docs.rs/foreign-types) + +A framework for Rust wrappers over C APIs. + +## License + +Licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally +submitted for inclusion in the work by you, as defined in the Apache-2.0 +license, shall be dual licensed as above, without any additional terms or +conditions. diff -Nru cargo-0.17.0/vendor/foreign-types-0.2.0/src/lib.rs cargo-0.19.0/vendor/foreign-types-0.2.0/src/lib.rs --- cargo-0.17.0/vendor/foreign-types-0.2.0/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/foreign-types-0.2.0/src/lib.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,296 @@ +//! A framework for Rust wrappers over C APIs. +//! +//! Ownership is as important in C as it is in Rust, but the semantics are often implicit. In +//! particular, pointer-to-value is commonly used to pass C values both when transferring ownership +//! or a borrow. +//! +//! This crate provides a framework to define a Rust wrapper over these kinds of raw C APIs in a way +//! that allows ownership semantics to be expressed in an ergonomic manner. The framework takes a +//! dual-type approach similar to APIs in the standard library such as `PathBuf`/`Path` or `String`/ +//! `&str`. One type represents an owned value and references to the other represent borrowed +//! values. +//! +//! # Examples +//! +//! ``` +//! use foreign_types::{ForeignType, ForeignTypeRef, Opaque}; +//! use std::ops::{Deref, DerefMut}; +//! +//! mod foo_sys { +//! pub enum FOO {} +//! +//! extern { +//! pub fn FOO_free(foo: *mut FOO); +//! } +//! } +//! +//! // The borrowed type is a newtype wrapper around an `Opaque` value. +//! // +//! // `FooRef` values never exist; we instead create references to `FooRef`s from raw C pointers. +//! pub struct FooRef(Opaque); +//! +//! impl ForeignTypeRef for FooRef { +//! type CType = foo_sys::FOO; +//! } +//! +//! // The owned type is simply a newtype wrapper around the raw C type. +//! // +//! // It dereferences to `FooRef`, so methods that do not require ownership should be defined +//! // there. +//! pub struct Foo(*mut foo_sys::FOO); +//! +//! impl Drop for Foo { +//! fn drop(&mut self) { +//! unsafe { foo_sys::FOO_free(self.0) } +//! } +//! } +//! +//! impl ForeignType for Foo { +//! type CType = foo_sys::FOO; +//! type Ref = FooRef; +//! +//! unsafe fn from_ptr(ptr: *mut foo_sys::FOO) -> Foo { +//! Foo(ptr) +//! } +//! +//! fn as_ptr(&self) -> *mut foo_sys::FOO { +//! self.0 +//! } +//! } +//! +//! impl Deref for Foo { +//! type Target = FooRef; +//! +//! fn deref(&self) -> &FooRef { +//! unsafe { FooRef::from_ptr(self.0) } +//! } +//! } +//! +//! impl DerefMut for Foo { +//! fn deref_mut(&mut self) -> &mut FooRef { +//! unsafe { FooRef::from_ptr_mut(self.0) } +//! } +//! } +//! ``` +//! +//! The `foreign_type!` macro can generate this boilerplate for you: +//! +//! ``` +//! #[macro_use] +//! extern crate foreign_types; +//! +//! mod foo_sys { +//! pub enum FOO {} +//! +//! extern { +//! pub fn FOO_free(foo: *mut FOO); +//! } +//! } +//! +//! foreign_type! { +//! type CType = foo_sys::FOO; +//! fn drop = foo_sys::FOO_free; +//! /// A Foo. +//! pub struct Foo; +//! /// A borrowed Foo. +//! pub struct FooRef; +//! } +//! +//! # fn main() {} +//! ``` +//! +//! Say we then have a separate type in our C API that contains a `FOO`: +//! +//! ``` +//! mod foo_sys { +//! pub enum FOO {} +//! pub enum BAR {} +//! +//! extern { +//! pub fn FOO_free(foo: *mut FOO); +//! pub fn BAR_free(bar: *mut BAR); +//! pub fn BAR_get_foo(bar: *mut BAR) -> *mut FOO; +//! } +//! } +//! ``` +//! +//! The documentation for the C library states that `BAR_get_foo` returns a reference into the `BAR` +//! passed to it, which translates into a reference in Rust. It also says that we're allowed to +//! modify the `FOO`, so we'll define a pair of accessor methods, one immutable and one mutable: +//! +//! ``` +//! #[macro_use] +//! extern crate foreign_types; +//! +//! use foreign_types::ForeignTypeRef; +//! +//! mod foo_sys { +//! pub enum FOO {} +//! pub enum BAR {} +//! +//! extern { +//! pub fn FOO_free(foo: *mut FOO); +//! pub fn BAR_free(bar: *mut BAR); +//! pub fn BAR_get_foo(bar: *mut BAR) -> *mut FOO; +//! } +//! } +//! +//! foreign_type! { +//! type CType = foo_sys::FOO; +//! fn drop = foo_sys::FOO_free; +//! /// A Foo. +//! pub struct Foo; +//! /// A borrowed Foo. +//! pub struct FooRef; +//! } +//! +//! foreign_type! { +//! type CType = foo_sys::BAR; +//! fn drop = foo_sys::BAR_free; +//! /// A Foo. +//! pub struct Bar; +//! /// A borrowed Bar. +//! pub struct BarRef; +//! } +//! +//! impl BarRef { +//! fn foo(&self) -> &FooRef { +//! unsafe { FooRef::from_ptr(foo_sys::BAR_get_foo(self.as_ptr())) } +//! } +//! +//! fn foo_mut(&mut self) -> &mut FooRef { +//! unsafe { FooRef::from_ptr_mut(foo_sys::BAR_get_foo(self.as_ptr())) } +//! } +//! } +//! +//! # fn main() {} +//! ``` +#![no_std] +#![warn(missing_docs)] +#![doc(html_root_url="https://docs.rs/foreign-types/0.2.0")] + +use core::cell::UnsafeCell; + +/// An opaque type used to define `ForeignTypeRef` types. +/// +/// A type implementing `ForeignTypeRef` should simply be a newtype wrapper around this type. +pub struct Opaque(UnsafeCell<()>); + +/// A type implemented by wrappers over foreign types. +pub trait ForeignType: Sized { + /// The raw C type. + type CType; + + /// The type representing a reference to this type. + type Ref: ForeignTypeRef; + + /// Constructs an instance of this type from its raw type. + unsafe fn from_ptr(ptr: *mut Self::CType) -> Self; + + /// Returns a raw pointer to the wrapped value. + fn as_ptr(&self) -> *mut Self::CType; +} + +/// A trait implemented by types which reference borrowed foreign types. +pub trait ForeignTypeRef: Sized { + /// The raw C type. + type CType; + + /// Constructs a shared instance of this type from its raw type. + #[inline] + unsafe fn from_ptr<'a>(ptr: *mut Self::CType) -> &'a Self { + &*(ptr as *mut _) + } + + /// Constructs a mutable reference of this type from its raw type. + #[inline] + unsafe fn from_ptr_mut<'a>(ptr: *mut Self::CType) -> &'a mut Self { + &mut *(ptr as *mut _) + } + + /// Returns a raw pointer to the wrapped value. + #[inline] + fn as_ptr(&self) -> *mut Self::CType { + self as *const _ as *mut _ + } +} + +/// A macro to easily define wrappers for foreign types. +/// +/// # Examples +/// +/// ``` +/// #[macro_use] +/// extern crate foreign_types; +/// +/// # mod openssl_sys { pub type SSL = (); pub unsafe fn SSL_free(_: *mut SSL) {} } +/// foreign_type! { +/// type CType = openssl_sys::SSL; +/// fn drop = openssl_sys::SSL_free; +/// /// Documentation for the owned type. +/// pub struct Ssl; +/// /// Documentation for the borrowed type. +/// pub struct SslRef; +/// } +/// +/// # fn main() {} +/// ``` +#[macro_export] +macro_rules! foreign_type { + ( + type CType = $ctype:ty; + fn drop = $drop:expr; + $(#[$owned_attr:meta])* + pub struct $owned:ident; + $(#[$borrowed_attr:meta])* + pub struct $borrowed:ident; + ) => { + $(#[$owned_attr])* + pub struct $owned(*mut $ctype); + + impl $crate::ForeignType for $owned { + type CType = $ctype; + type Ref = $borrowed; + + #[inline] + unsafe fn from_ptr(ptr: *mut $ctype) -> $owned { + $owned(ptr) + } + + #[inline] + fn as_ptr(&self) -> *mut $ctype { + self.0 + } + } + + impl Drop for $owned { + #[inline] + fn drop(&mut self) { + unsafe { $drop(self.0) } + } + } + + impl ::std::ops::Deref for $owned { + type Target = $borrowed; + + #[inline] + fn deref(&self) -> &$borrowed { + unsafe { $crate::ForeignTypeRef::from_ptr(self.0) } + } + } + + impl ::std::ops::DerefMut for $owned { + #[inline] + fn deref_mut(&mut self) -> &mut $borrowed { + unsafe { $crate::ForeignTypeRef::from_ptr_mut(self.0) } + } + } + + $(#[$borrowed_attr])* + pub struct $borrowed($crate::Opaque); + + impl $crate::ForeignTypeRef for $borrowed { + type CType = $ctype; + } + } +} diff -Nru cargo-0.17.0/vendor/foreign-types-0.2.0/.travis.yml cargo-0.19.0/vendor/foreign-types-0.2.0/.travis.yml --- cargo-0.17.0/vendor/foreign-types-0.2.0/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/foreign-types-0.2.0/.travis.yml 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,7 @@ +language: rust +cache: cargo +rust: +- nightly +- 1.13.0 +script: +- cargo test diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/.appveyor.yml cargo-0.19.0/vendor/fs2-0.3.0/.appveyor.yml --- cargo-0.17.0/vendor/fs2-0.3.0/.appveyor.yml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/.appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -environment: - matrix: - - TARGET: x86_64-pc-windows-msvc - - TARGET: i686-pc-windows-msvc - - TARGET: x86_64-pc-windows-gnu - - TARGET: i686-pc-windows-gnu - -install: - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" -FileName "rust-nightly.exe" - - ps: .\rust-nightly.exe /VERYSILENT /NORESTART /DIR="C:\rust" | Out-Null - - ps: $env:PATH="$env:PATH;C:\rust\bin" - -build_script: - - cargo build -v - -test_script: - - SET RUST_BACKTRACE=1 - - cargo test -v diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/.cargo-checksum.json cargo-0.19.0/vendor/fs2-0.3.0/.cargo-checksum.json --- cargo-0.17.0/vendor/fs2-0.3.0/.cargo-checksum.json 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".appveyor.yml":"15c5548159ad6ebcc02960bb6a3269e729e772df2733b7d4c7cc1583c413ae45",".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"75cedbf0e785b1d17808d087e1cf9ef2d2fff218ef5a91855a1c3a81524ddb76","Cargo.toml":"d29b2cb48b6a8737d804edeee03597b65800b6c94af2f5c9d5ac8678ba45ef5d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"1fea7c4711473a0b6a2f0f452c7c2e14776507ac01a7af22275440de6d22b54e","src/lib.rs":"807a596f05977cb6cec922a9053baffadabde603481956663bdb58d89817f859","src/unix.rs":"a62328ca72d9b782d900c287f3b9886e308f24c15b8acecac26ea1bbd783ca0e","src/windows.rs":"40cef48fc30365e8848fbd541e5a2dede08de4b8cfc43f065537466c67c846bf"},"package":"640001e1bd865c7c32806292822445af576a6866175b5225aa2087ca5e3de551"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/Cargo.toml cargo-0.19.0/vendor/fs2-0.3.0/Cargo.toml --- cargo-0.17.0/vendor/fs2-0.3.0/Cargo.toml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -[package] -name = "fs2" -version = "0.3.0" -authors = ["Dan Burkert "] -license = "MIT/Apache-2.0" -repository = "https://github.com/danburkert/fs2-rs" -documentation = "https://docs.rs/fs2" -description = "Cross-platform file locks and file duplication." -keywords = ["file", "file-system", "lock", "duplicate", "flock"] - -[target.'cfg(unix)'.dependencies] -libc = "0.2.2" - -[target.'cfg(windows)'.dependencies] -winapi = "0.2" -kernel32-sys = "0.2" - -[dev-dependencies] -tempdir = "0.3" diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/.gitignore cargo-0.19.0/vendor/fs2-0.3.0/.gitignore --- cargo-0.17.0/vendor/fs2-0.3.0/.gitignore 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -target -Cargo.lock diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/LICENSE-APACHE cargo-0.19.0/vendor/fs2-0.3.0/LICENSE-APACHE --- cargo-0.17.0/vendor/fs2-0.3.0/LICENSE-APACHE 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/LICENSE-MIT cargo-0.19.0/vendor/fs2-0.3.0/LICENSE-MIT --- cargo-0.17.0/vendor/fs2-0.3.0/LICENSE-MIT 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) 2015 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/README.md cargo-0.19.0/vendor/fs2-0.3.0/README.md --- cargo-0.17.0/vendor/fs2-0.3.0/README.md 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -# fs2 - -Extended utilities for working with files and filesystems in Rust. `fs2` -requires Rust stable 1.8 or greater. - -[Documentation](https://danburkert.github.io/fs2-rs/fs2/index.html) - -[![Linux Status](https://travis-ci.org/danburkert/fs2-rs.svg?branch=master)](https://travis-ci.org/danburkert/fs2-rs) -[![Windows Status](https://ci.appveyor.com/api/projects/status/iuvjv1aaaml0rntt/branch/master?svg=true)](https://ci.appveyor.com/project/danburkert/fs2-rs) - -## Features - -- [x] file descriptor duplication. -- [x] file locks. -- [x] file (pre)allocation. -- [x] file allocation information. -- [x] filesystem space usage information. - -## Platforms - -`fs2` should work on any platform supported by -[`libc`](https://github.com/rust-lang-nursery/libc#platforms-and-documentation). - -`fs2` is continuously tested on: - * `x86_64-unknown-linux-gnu` (Linux) - * `i686-unknown-linux-gnu` - * `x86_64-apple-darwin` (OSX) - * `i686-apple-darwin` - * `x86_64-pc-windows-msvc` (Windows) - * `i686-pc-windows-msvc` - * `x86_64-pc-windows-gnu` - * `i686-pc-windows-gnu` - -## Benchmarks - -Simple benchmarks are provided for the methods provided. Many of these -benchmarks use files in a temporary directory. On many modern Linux distros the -default temporary directory, `/tmp`, is mounted on a tempfs filesystem, which -will have different performance characteristics than a disk-backed filesystem. -The temporary directory is configurable at runtime through the environment (see -[`env::temp_dir`](https://doc.rust-lang.org/stable/std/env/fn.temp_dir.html)). - -## License - -`fs2` is primarily distributed under the terms of both the MIT license and the -Apache License (Version 2.0). - -See [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT) for details. - -Copyright (c) 2015 Dan Burkert. diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/src/lib.rs cargo-0.19.0/vendor/fs2-0.3.0/src/lib.rs --- cargo-0.17.0/vendor/fs2-0.3.0/src/lib.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,407 +0,0 @@ -#![cfg_attr(test, feature(test))] -#![deny(warnings)] - -#[cfg(unix)] -mod unix; -#[cfg(unix)] -use unix as sys; - -#[cfg(windows)] -mod windows; -#[cfg(windows)] -use windows as sys; - -use std::fs::File; -use std::io::{Error, Result}; -use std::path::Path; - -/// Extension trait for `std::fs::File` which provides allocation, duplication and locking methods. -/// -/// ## Notes on File Locks -/// -/// This library provides whole-file locks in both shared (read) and exclusive -/// (read-write) varieties. -/// -/// File locks are a cross-platform hazard since the file lock APIs exposed by -/// operating system kernels vary in subtle and not-so-subtle ways. -/// -/// The API exposed by this library can be safely used across platforms as long -/// as the following rules are followed: -/// -/// * Multiple locks should not be created on an individual `File` instance -/// concurrently. -/// * Duplicated files should not be locked without great care. -/// * Files to be locked should be opened with at least read or write -/// permissions. -/// * File locks may only be relied upon to be advisory. -/// -/// See the tests in `lib.rs` for cross-platform lock behavior that may be -/// relied upon; see the tests in `unix.rs` and `windows.rs` for examples of -/// platform-specific behavior. File locks are implemented with -/// [`flock(2)`](http://man7.org/linux/man-pages/man2/flock.2.html) on Unix and -/// [`LockFile`](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365202(v=vs.85).aspx) -/// on Windows. -pub trait FileExt { - - /// Returns a duplicate instance of the file. - /// - /// The returned file will share the same file position as the original - /// file. - /// - /// # Notes - /// - /// This is implemented with - /// [`dup(2)`](http://man7.org/linux/man-pages/man2/dup.2.html) on Unix and - /// [`DuplicateHandle`](https://msdn.microsoft.com/en-us/library/windows/desktop/ms724251(v=vs.85).aspx) - /// on Windows. - fn duplicate(&self) -> Result; - - /// Returns the amount of physical space allocated for a file. - fn allocated_size(&self) -> Result; - - /// Ensures that at least `len` bytes of disk space are allocated for the - /// file, and the file size is at least `len` bytes. After a successful call - /// to `allocate`, subsequent writes to the file within the specified length - /// are guaranteed not to fail because of lack of disk space. - fn allocate(&self, len: u64) -> Result<()>; - - /// Locks the file for shared usage, blocking if the file is currently - /// locked exclusively. - fn lock_shared(&self) -> Result<()>; - - /// Locks the file for exclusive usage, blocking if the file is currently - /// locked. - fn lock_exclusive(&self) -> Result<()>; - - /// Locks the file for shared usage, or returns a an error if the file is - /// currently locked (see `lock_contended_error`). - fn try_lock_shared(&self) -> Result<()>; - - /// Locks the file for shared usage, or returns a an error if the file is - /// currently locked (see `lock_contended_error`). - fn try_lock_exclusive(&self) -> Result<()>; - - /// Unlocks the file. - fn unlock(&self) -> Result<()>; -} - -impl FileExt for File { - fn duplicate(&self) -> Result { - sys::duplicate(self) - } - fn allocated_size(&self) -> Result { - sys::allocated_size(self) - } - fn allocate(&self, len: u64) -> Result<()> { - sys::allocate(self, len) - } - fn lock_shared(&self) -> Result<()> { - sys::lock_shared(self) - } - fn lock_exclusive(&self) -> Result<()> { - sys::lock_exclusive(self) - } - fn try_lock_shared(&self) -> Result<()> { - sys::try_lock_shared(self) - } - fn try_lock_exclusive(&self) -> Result<()> { - sys::try_lock_exclusive(self) - } - fn unlock(&self) -> Result<()> { - sys::unlock(self) - } -} - -/// Returns the error that a call to a try lock method on a contended file will -/// return. -pub fn lock_contended_error() -> Error { - sys::lock_error() -} - -/// Returns the number of free bytes in the file system containing the provided -/// path. -pub fn free_space

(path: P) -> Result where P: AsRef { - sys::free_space(path) -} - -/// Returns the available space in bytes to non-priveleged users in the file -/// system containing the provided path. -pub fn available_space

(path: P) -> Result where P: AsRef { - sys::available_space(path) -} - -/// Returns the total space in bytes in the file system containing the provided -/// path. -pub fn total_space

(path: P) -> Result where P: AsRef { - sys::total_space(path) -} - -/// Returns the filesystem's disk space allocation granularity in bytes. -/// The provided path may be for any file in the filesystem. -/// -/// On Posix, this is equivalent to the filesystem's block size. -/// On Windows, this is equivalent to the filesystem's cluster size. -pub fn allocation_granularity

(path: P) -> Result where P: AsRef { - sys::allocation_granularity(path) -} - -#[cfg(test)] -mod test { - - extern crate tempdir; - extern crate test; - - use std::fs; - use super::*; - use std::io::{Read, Seek, SeekFrom, Write}; - - /// Tests file duplication. - #[test] - fn duplicate() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let mut file1 = - fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let mut file2 = file1.duplicate().unwrap(); - - // Write into the first file and then drop it. - file1.write_all(b"foo").unwrap(); - drop(file1); - - let mut buf = vec![]; - - // Read from the second file; since the position is shared it will already be at EOF. - file2.read_to_end(&mut buf).unwrap(); - assert_eq!(0, buf.len()); - - // Rewind and read. - file2.seek(SeekFrom::Start(0)).unwrap(); - file2.read_to_end(&mut buf).unwrap(); - assert_eq!(&buf, &b"foo"); - } - - /// Tests shared file lock operations. - #[test] - fn lock_shared() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file3 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - // Concurrent shared access is OK, but not shared and exclusive. - file1.lock_shared().unwrap(); - file2.lock_shared().unwrap(); - assert_eq!(file3.try_lock_exclusive().unwrap_err().kind(), - lock_contended_error().kind()); - file1.unlock().unwrap(); - assert_eq!(file3.try_lock_exclusive().unwrap_err().kind(), - lock_contended_error().kind()); - - // Once all shared file locks are dropped, an exclusive lock may be created; - file2.unlock().unwrap(); - file3.lock_exclusive().unwrap(); - } - - /// Tests exclusive file lock operations. - #[test] - fn lock_exclusive() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - // No other access is possible once an exclusive lock is created. - file1.lock_exclusive().unwrap(); - assert_eq!(file2.try_lock_exclusive().unwrap_err().kind(), - lock_contended_error().kind()); - assert_eq!(file2.try_lock_shared().unwrap_err().kind(), - lock_contended_error().kind()); - - // Once the exclusive lock is dropped, the second file is able to create a lock. - file1.unlock().unwrap(); - file2.lock_exclusive().unwrap(); - } - - /// Tests that a lock is released after the file that owns it is dropped. - #[test] - fn lock_cleanup() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - file1.lock_exclusive().unwrap(); - assert_eq!(file2.try_lock_shared().unwrap_err().kind(), - lock_contended_error().kind()); - - // Drop file1; the lock should be released. - drop(file1); - file2.lock_shared().unwrap(); - } - - /// Tests file allocation. - #[test] - fn allocate() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let blksize = allocation_granularity(&path).unwrap(); - - // New files are created with no allocated size. - assert_eq!(0, file.allocated_size().unwrap()); - assert_eq!(0, file.metadata().unwrap().len()); - - // Allocate space for the file, checking that the allocated size steps - // up by block size, and the file length matches the allocated size. - - file.allocate(2 * blksize - 1).unwrap(); - assert_eq!(2 * blksize, file.allocated_size().unwrap()); - assert_eq!(2 * blksize - 1, file.metadata().unwrap().len()); - - // Truncate the file, checking that the allocated size steps down by - // block size. - - file.set_len(blksize + 1).unwrap(); - assert_eq!(2 * blksize, file.allocated_size().unwrap()); - assert_eq!(blksize + 1, file.metadata().unwrap().len()); - } - - /// Checks filesystem space methods. - #[test] - fn filesystem_space() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let total_space = total_space(&tempdir.path()).unwrap(); - let free_space = free_space(&tempdir.path()).unwrap(); - let available_space = available_space(&tempdir.path()).unwrap(); - - assert!(total_space > free_space); - assert!(total_space > available_space); - assert!(available_space <= free_space); - } - - /// Benchmarks creating and removing a file. This is a baseline benchmark - /// for comparing against the truncate and allocate benchmarks. - #[bench] - fn bench_file_create(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("file"); - - b.iter(|| { - fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path) - .unwrap(); - fs::remove_file(&path).unwrap(); - }); - } - - /// Benchmarks creating a file, truncating it to 32MiB, and deleting it. - #[bench] - fn bench_file_truncate(b: &mut test::Bencher) { - let size = 32 * 1024 * 1024; - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("file"); - - b.iter(|| { - let file = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path) - .unwrap(); - file.set_len(size).unwrap(); - fs::remove_file(&path).unwrap(); - }); - } - - /// Benchmarks creating a file, allocating 32MiB for it, and deleting it. - #[bench] - fn bench_file_allocate(b: &mut test::Bencher) { - let size = 32 * 1024 * 1024; - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("file"); - - b.iter(|| { - let file = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path) - .unwrap(); - file.allocate(size).unwrap(); - fs::remove_file(&path).unwrap(); - }); - } - - /// Benchmarks creating a file, allocating 32MiB for it, and deleting it. - #[bench] - fn bench_allocated_size(b: &mut test::Bencher) { - let size = 32 * 1024 * 1024; - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("file"); - let file = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path) - .unwrap(); - file.allocate(size).unwrap(); - - b.iter(|| { - file.allocated_size().unwrap(); - }); - } - - /// Benchmarks duplicating a file descriptor or handle. - #[bench] - fn bench_duplicate(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - b.iter(|| test::black_box(file.duplicate().unwrap())); - } - - /// Benchmarks locking and unlocking a file lock. - #[bench] - fn bench_lock_unlock(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - b.iter(|| { - file.lock_exclusive().unwrap(); - file.unlock().unwrap(); - }); - } - - /// Benchmarks the free space method. - #[bench] - fn bench_free_space(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - b.iter(|| { - test::black_box(free_space(&tempdir.path()).unwrap()); - }); - } - - /// Benchmarks the available space method. - #[bench] - fn bench_available_space(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - b.iter(|| { - test::black_box(available_space(&tempdir.path()).unwrap()); - }); - } - - /// Benchmarks the total space method. - #[bench] - fn bench_total_space(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - b.iter(|| { - test::black_box(total_space(&tempdir.path()).unwrap()); - }); - } -} diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/src/unix.rs cargo-0.19.0/vendor/fs2-0.3.0/src/unix.rs --- cargo-0.17.0/vendor/fs2-0.3.0/src/unix.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/src/unix.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,219 +0,0 @@ -extern crate libc; - -use std::ffi::CString; -use std::fs::File; -use std::io::{Error, ErrorKind, Result}; -use std::mem; -use std::os::unix::ffi::OsStrExt; -use std::os::unix::fs::MetadataExt; -use std::os::unix::io::{AsRawFd, FromRawFd}; -use std::path::Path; - -pub fn duplicate(file: &File) -> Result { - unsafe { - let fd = libc::dup(file.as_raw_fd()); - - if fd < 0 { - Err(Error::last_os_error()) - } else { - Ok(File::from_raw_fd(fd)) - } - } -} - -pub fn lock_shared(file: &File) -> Result<()> { - flock(file, libc::LOCK_SH) -} - -pub fn lock_exclusive(file: &File) -> Result<()> { - flock(file, libc::LOCK_EX) -} - -pub fn try_lock_shared(file: &File) -> Result<()> { - flock(file, libc::LOCK_SH | libc::LOCK_NB) -} - -pub fn try_lock_exclusive(file: &File) -> Result<()> { - flock(file, libc::LOCK_EX | libc::LOCK_NB) -} - -pub fn unlock(file: &File) -> Result<()> { - flock(file, libc::LOCK_UN) -} - -pub fn lock_error() -> Error { - Error::from_raw_os_error(libc::EWOULDBLOCK) -} - -fn flock(file: &File, flag: libc::c_int) -> Result<()> { - let ret = unsafe { libc::flock(file.as_raw_fd(), flag) }; - if ret < 0 { Err(Error::last_os_error()) } else { Ok(()) } -} - -pub fn allocated_size(file: &File) -> Result { - file.metadata().map(|m| m.blocks() as u64 * 512) -} - -#[cfg(any(target_os = "linux", - target_os = "freebsd", - target_os = "android", - target_os = "nacl"))] -pub fn allocate(file: &File, len: u64) -> Result<()> { - let ret = unsafe { libc::posix_fallocate(file.as_raw_fd(), 0, len as libc::off_t) }; - if ret == 0 { Ok(()) } else { Err(Error::last_os_error()) } -} - -#[cfg(any(target_os = "macos", target_os = "ios"))] -pub fn allocate(file: &File, len: u64) -> Result<()> { - let stat = try!(file.metadata()); - - if len > stat.blocks() as u64 * 512 { - let mut fstore = libc::fstore_t { - fst_flags: libc::F_ALLOCATECONTIG, - fst_posmode: libc::F_PEOFPOSMODE, - fst_offset: 0, - fst_length: len as libc::off_t, - fst_bytesalloc: 0, - }; - - let ret = unsafe { libc::fcntl(file.as_raw_fd(), libc::F_PREALLOCATE, &fstore) }; - if ret == -1 { - // Unable to allocate contiguous disk space; attempt to allocate non-contiguously. - fstore.fst_flags = libc::F_ALLOCATEALL; - let ret = unsafe { libc::fcntl(file.as_raw_fd(), libc::F_PREALLOCATE, &fstore) }; - if ret == -1 { - return Err(Error::last_os_error()); - } - } - } - - if len > stat.size() as u64 { - file.set_len(len) - } else { - Ok(()) - } -} - -#[cfg(any(target_os = "openbsd", - target_os = "netbsd", - target_os = "dragonfly", - target_os = "solaris"))] -pub fn allocate(file: &File, len: u64) -> Result<()> { - // No file allocation API available, just set the length if necessary. - if len > try!(file.metadata()).len() as u64 { - file.set_len(len) - } else { - Ok(()) - } -} - -fn statvfs

(path: P) -> Result where P: AsRef { - let cstr = match CString::new(path.as_ref().as_os_str().as_bytes()) { - Ok(cstr) => cstr, - Err(..) => return Err(Error::new(ErrorKind::InvalidInput, "path contained a null")), - }; - - unsafe { - let mut stat: libc::statvfs = mem::zeroed(); - // danburkert/fs2-rs#1: cast is necessary for platforms where c_char != u8. - if libc::statvfs(cstr.as_ptr() as *const _, &mut stat) == -1 { - Err(Error::last_os_error()) - } else { - Ok(stat) - } - } -} - -pub fn free_space

(path: P) -> Result where P: AsRef { - statvfs(path).map(|statvfs| statvfs.f_frsize as u64 * statvfs.f_bfree as u64) -} - -pub fn available_space

(path: P) -> Result where P: AsRef { - statvfs(path).map(|statvfs| statvfs.f_frsize as u64 * statvfs.f_bavail as u64) -} - -pub fn total_space

(path: P) -> Result where P: AsRef { - statvfs(path).map(|statvfs| statvfs.f_frsize as u64 * statvfs.f_blocks as u64) -} - -pub fn allocation_granularity

(path: P) -> Result where P: AsRef { - statvfs(path).map(|statvfs| statvfs.f_frsize as u64) -} - -#[cfg(test)] -mod test { - extern crate tempdir; - extern crate libc; - - use std::fs::{self, File}; - use std::os::unix::io::AsRawFd; - - use {FileExt, lock_contended_error}; - - /// The duplicate method returns a file with a new file descriptor. - #[test] - fn duplicate_new_fd() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - assert!(file1.as_raw_fd() != file2.as_raw_fd()); - } - - /// The duplicate method should preservesthe close on exec flag. - #[test] - fn duplicate_cloexec() { - - fn flags(file: &File) -> libc::c_int { - unsafe { libc::fcntl(file.as_raw_fd(), libc::F_GETFL, 0) } - } - - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - - assert_eq!(flags(&file1), flags(&file2)); - } - - /// Tests that locking a file descriptor will replace any existing locks - /// held on the file descriptor. - #[test] - fn lock_replace() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let file2 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - - // Creating a shared lock will drop an exclusive lock. - file1.lock_exclusive().unwrap(); - file1.lock_shared().unwrap(); - file2.lock_shared().unwrap(); - - // Attempting to replace a shared lock with an exclusive lock will fail - // with multiple lock holders, and remove the original shared lock. - assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - file1.lock_shared().unwrap(); - } - - /// Tests that locks are shared among duplicated file descriptors. - #[test] - fn lock_duplicate() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - let file3 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - - // Create a lock through fd1, then replace it through fd2. - file1.lock_shared().unwrap(); - file2.lock_exclusive().unwrap(); - assert_eq!(file3.try_lock_shared().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - // Either of the file descriptors should be able to unlock. - file1.unlock().unwrap(); - file3.lock_shared().unwrap(); - } -} diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/src/windows.rs cargo-0.19.0/vendor/fs2-0.3.0/src/windows.rs --- cargo-0.17.0/vendor/fs2-0.3.0/src/windows.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/src/windows.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,292 +0,0 @@ -extern crate kernel32; -extern crate winapi; - -use std::fs::File; -use std::io::{Error, Result}; -use std::mem; -use std::os::windows::ffi::OsStrExt; -use std::os::windows::io::{AsRawHandle, FromRawHandle}; -use std::path::Path; -use std::ptr; - -pub fn duplicate(file: &File) -> Result { - unsafe { - let mut handle = ptr::null_mut(); - let current_process = kernel32::GetCurrentProcess(); - let ret = kernel32::DuplicateHandle(current_process, - file.as_raw_handle(), - current_process, - &mut handle, - 0, - true as winapi::BOOL, - winapi::DUPLICATE_SAME_ACCESS); - if ret == 0 { - Err(Error::last_os_error()) - } else { - Ok(File::from_raw_handle(handle)) - } - } -} - -pub fn allocated_size(file: &File) -> Result { - unsafe { - let mut info: winapi::FILE_STANDARD_INFO = mem::zeroed(); - - let ret = kernel32::GetFileInformationByHandleEx( - file.as_raw_handle(), - winapi::FileStandardInfo, - &mut info as *mut _ as *mut _, - mem::size_of::() as winapi::DWORD); - - if ret == 0 { - Err(Error::last_os_error()) - } else { - Ok(info.AllocationSize as u64) - } - } -} - -pub fn allocate(file: &File, len: u64) -> Result<()> { - if try!(allocated_size(file)) < len { - unsafe { - let mut info: winapi::FILE_ALLOCATION_INFO = mem::zeroed(); - info.AllocationSize = len as i64; - let ret = kernel32::SetFileInformationByHandle( - file.as_raw_handle(), - winapi::FileAllocationInfo, - &mut info as *mut _ as *mut _, - mem::size_of::() as winapi::DWORD); - if ret == 0 { - return Err(Error::last_os_error()); - } - } - } - if try!(file.metadata()).len() < len { - file.set_len(len) - } else { - Ok(()) - } -} - -pub fn lock_shared(file: &File) -> Result<()> { - lock_file(file, 0) -} - -pub fn lock_exclusive(file: &File) -> Result<()> { - lock_file(file, winapi::LOCKFILE_EXCLUSIVE_LOCK) -} - -pub fn try_lock_shared(file: &File) -> Result<()> { - lock_file(file, winapi::LOCKFILE_FAIL_IMMEDIATELY) -} - -pub fn try_lock_exclusive(file: &File) -> Result<()> { - lock_file(file, winapi::LOCKFILE_EXCLUSIVE_LOCK | winapi::LOCKFILE_FAIL_IMMEDIATELY) -} - -pub fn unlock(file: &File) -> Result<()> { - unsafe { - let ret = kernel32::UnlockFile(file.as_raw_handle(), 0, 0, !0, !0); - if ret == 0 { Err(Error::last_os_error()) } else { Ok(()) } - } -} - -pub fn lock_error() -> Error { - Error::from_raw_os_error(winapi::ERROR_LOCK_VIOLATION as i32) -} - -fn lock_file(file: &File, flags: winapi::DWORD) -> Result<()> { - unsafe { - let mut overlapped = mem::zeroed(); - let ret = kernel32::LockFileEx(file.as_raw_handle(), flags, 0, !0, !0, &mut overlapped); - if ret == 0 { Err(Error::last_os_error()) } else { Ok(()) } - } -} - -fn volume_path

(path: P, volume_path: &mut [u16]) -> Result<()> where P: AsRef { - let path_utf8: Vec = path.as_ref().as_os_str().encode_wide().chain(Some(0)).collect(); - unsafe { - let ret = kernel32::GetVolumePathNameW(path_utf8.as_ptr(), - volume_path.as_mut_ptr(), - volume_path.len() as winapi::DWORD); - if ret == 0 { Err(Error::last_os_error()) } else { Ok(()) - } - } -} - -fn get_disk_free_space

(path: P) -> Result<(u64, u64, u64, u64)> where P: AsRef { - let root_path: &mut [u16] = &mut [0; 261]; - try!(volume_path(path, root_path)); - unsafe { - - let mut sectors_per_cluster = 0; - let mut bytes_per_sector = 0; - let mut number_of_free_clusters = 0; - let mut total_number_of_clusters = 0; - let ret = kernel32::GetDiskFreeSpaceW(root_path.as_ptr(), - &mut sectors_per_cluster, - &mut bytes_per_sector, - &mut number_of_free_clusters, - &mut total_number_of_clusters); - if ret == 0 { - Err(Error::last_os_error()) - } else { - Ok((sectors_per_cluster as u64, - bytes_per_sector as u64, - number_of_free_clusters as u64, - total_number_of_clusters as u64)) - } - } -} - -pub fn free_space

(path: P) -> Result where P: AsRef { - available_space(path) -} - -pub fn available_space

(path: P) -> Result where P: AsRef { - get_disk_free_space(path).map(|(sectors_per_cluster, - bytes_per_sector, - number_of_free_clusters, - _)| { - number_of_free_clusters * sectors_per_cluster * bytes_per_sector - }) -} - -pub fn total_space

(path: P) -> Result where P: AsRef { - get_disk_free_space(path).map(|(sectors_per_cluster, - bytes_per_sector, - _, - total_number_of_clusters)| { - total_number_of_clusters * sectors_per_cluster * bytes_per_sector - }) -} - -pub fn allocation_granularity

(path: P) -> Result where P: AsRef { - get_disk_free_space(path).map(|(sectors_per_cluster, bytes_per_sector, _, _)| { - sectors_per_cluster * bytes_per_sector - }) -} - -#[cfg(test)] -mod test { - - extern crate tempdir; - - use std::fs; - use std::os::windows::io::AsRawHandle; - - use {FileExt, lock_contended_error}; - - /// The duplicate method returns a file with a new file handle. - #[test] - fn duplicate_new_handle() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - assert!(file1.as_raw_handle() != file2.as_raw_handle()); - } - - /// A duplicated file handle does not have access to the original handle's locks. - #[test] - fn lock_duplicate_handle_independence() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - - // Locking the original file handle will block the duplicate file handle from opening a lock. - file1.lock_shared().unwrap(); - assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - // Once the original file handle is unlocked, the duplicate handle can proceed with a lock. - file1.unlock().unwrap(); - file2.lock_exclusive().unwrap(); - } - - /// A file handle may not be exclusively locked multiple times, or exclusively locked and then - /// shared locked. - #[test] - fn lock_non_reentrant() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - // Multiple exclusive locks fails. - file.lock_exclusive().unwrap(); - assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - file.unlock().unwrap(); - - // Shared then Exclusive locks fails. - file.lock_shared().unwrap(); - assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - } - - /// A file handle can hold an exclusive lock and any number of shared locks, all of which must - /// be unlocked independently. - #[test] - fn lock_layering() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - // Open two shared locks on the file, and then try and fail to open an exclusive lock. - file.lock_exclusive().unwrap(); - file.lock_shared().unwrap(); - file.lock_shared().unwrap(); - assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - // Pop one of the shared locks and try again. - file.unlock().unwrap(); - assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - // Pop the second shared lock and try again. - file.unlock().unwrap(); - assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - // Pop the exclusive lock and finally succeed. - file.unlock().unwrap(); - file.lock_exclusive().unwrap(); - } - - /// A file handle with multiple open locks will have all locks closed on drop. - #[test] - fn lock_layering_cleanup() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - // Open two shared locks on the file, and then try and fail to open an exclusive lock. - file1.lock_shared().unwrap(); - assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - drop(file1); - file2.lock_exclusive().unwrap(); - } - - /// A file handle's locks will not be released until the original handle and all of its - /// duplicates have been closed. This on really smells like a bug in Windows. - #[test] - fn lock_duplicate_cleanup() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - - // Open a lock on the original handle, then close it. - file1.lock_shared().unwrap(); - drop(file1); - - // Attempting to create a lock on the file with the duplicate handle will fail. - assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - } -} diff -Nru cargo-0.17.0/vendor/fs2-0.3.0/.travis.yml cargo-0.19.0/vendor/fs2-0.3.0/.travis.yml --- cargo-0.17.0/vendor/fs2-0.3.0/.travis.yml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.3.0/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -language: rust - -rust: -- 1.8.0 -- stable -- nightly - -os: - - linux - - osx - -script: - - cargo build --verbose - - if [[ $TRAVIS_RUST_VERSION = nightly* ]]; then - env RUST_BACKTRACE=1 cargo test -v; - fi diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/.appveyor.yml cargo-0.19.0/vendor/fs2-0.4.1/.appveyor.yml --- cargo-0.17.0/vendor/fs2-0.4.1/.appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/.appveyor.yml 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,18 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc + - TARGET: i686-pc-windows-msvc + - TARGET: x86_64-pc-windows-gnu + - TARGET: i686-pc-windows-gnu + +install: + - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" -FileName "rust-nightly.exe" + - ps: .\rust-nightly.exe /VERYSILENT /NORESTART /DIR="C:\rust" | Out-Null + - ps: $env:PATH="$env:PATH;C:\rust\bin" + +build_script: + - cargo build -v + +test_script: + - SET RUST_BACKTRACE=1 + - cargo test -v diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/.cargo-checksum.json cargo-0.19.0/vendor/fs2-0.4.1/.cargo-checksum.json --- cargo-0.17.0/vendor/fs2-0.4.1/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/.cargo-checksum.json 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".appveyor.yml":"15c5548159ad6ebcc02960bb6a3269e729e772df2733b7d4c7cc1583c413ae45",".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"5733d01f7cd27cbdd17a46399103e83eca528727e6cad7f355f6748e772ef916","Cargo.toml":"3e891e5c7d0b3faecc455bf0551c9c43beba354a25a251ca4108b605d6d33934","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"7667acd3dfd050dadccf8b7815435b9108c24c5704944085281beed6a181e220","src/lib.rs":"a960a99addadb842fa806eb2b5414408abb535ed6786eee5955a893259b9d7d5","src/unix.rs":"1d3c808352a4ac18aa6734f692c032287004093ab3f15ce9ee84fd1ab13329fc","src/windows.rs":"5767d923280998e341504f8d2a015b8b0c3f8b2b1188610aa4c1b6a343da5682"},"package":"34edaee07555859dc13ca387e6ae05686bb4d0364c95d649b6dab959511f4baf"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/Cargo.toml cargo-0.19.0/vendor/fs2-0.4.1/Cargo.toml --- cargo-0.17.0/vendor/fs2-0.4.1/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/Cargo.toml 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,19 @@ +[package] +name = "fs2" +version = "0.4.1" +authors = ["Dan Burkert "] +license = "MIT/Apache-2.0" +repository = "https://github.com/danburkert/fs2-rs" +documentation = "https://docs.rs/fs2" +description = "Cross-platform file locks and file duplication." +keywords = ["file", "file-system", "lock", "duplicate", "flock"] + +[target.'cfg(unix)'.dependencies] +libc = "0.2.2" + +[target.'cfg(windows)'.dependencies] +winapi = "0.2" +kernel32-sys = "0.2" + +[dev-dependencies] +tempdir = "0.3" diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/.gitignore cargo-0.19.0/vendor/fs2-0.4.1/.gitignore --- cargo-0.17.0/vendor/fs2-0.4.1/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/.gitignore 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,2 @@ +target +Cargo.lock diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/LICENSE-APACHE cargo-0.19.0/vendor/fs2-0.4.1/LICENSE-APACHE --- cargo-0.17.0/vendor/fs2-0.4.1/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/LICENSE-APACHE 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/LICENSE-MIT cargo-0.19.0/vendor/fs2-0.4.1/LICENSE-MIT --- cargo-0.17.0/vendor/fs2-0.4.1/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/LICENSE-MIT 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,25 @@ +Copyright (c) 2015 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/README.md cargo-0.19.0/vendor/fs2-0.4.1/README.md --- cargo-0.17.0/vendor/fs2-0.4.1/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/README.md 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,50 @@ +# fs2 + +Extended utilities for working with files and filesystems in Rust. `fs2` +requires Rust stable 1.8 or greater. + +[Documentation](https://docs.rs/fs2) + +[![Linux Status](https://travis-ci.org/danburkert/fs2-rs.svg?branch=master)](https://travis-ci.org/danburkert/fs2-rs) +[![Windows Status](https://ci.appveyor.com/api/projects/status/iuvjv1aaaml0rntt/branch/master?svg=true)](https://ci.appveyor.com/project/danburkert/fs2-rs) + +## Features + +- [x] file descriptor duplication. +- [x] file locks. +- [x] file (pre)allocation. +- [x] file allocation information. +- [x] filesystem space usage information. + +## Platforms + +`fs2` should work on any platform supported by +[`libc`](https://github.com/rust-lang-nursery/libc#platforms-and-documentation). + +`fs2` is continuously tested on: + * `x86_64-unknown-linux-gnu` (Linux) + * `i686-unknown-linux-gnu` + * `x86_64-apple-darwin` (OSX) + * `i686-apple-darwin` + * `x86_64-pc-windows-msvc` (Windows) + * `i686-pc-windows-msvc` + * `x86_64-pc-windows-gnu` + * `i686-pc-windows-gnu` + +## Benchmarks + +Simple benchmarks are provided for the methods provided. Many of these +benchmarks use files in a temporary directory. On many modern Linux distros the +default temporary directory, `/tmp`, is mounted on a tempfs filesystem, which +will have different performance characteristics than a disk-backed filesystem. +The temporary directory is configurable at runtime through the environment (see +[`env::temp_dir`](https://doc.rust-lang.org/stable/std/env/fn.temp_dir.html)). + +## License + +`fs2` is primarily distributed under the terms of both the MIT license and the +Apache License (Version 2.0). + +See [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT) for details. + +Copyright (c) 2015 Dan Burkert. diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/src/lib.rs cargo-0.19.0/vendor/fs2-0.4.1/src/lib.rs --- cargo-0.17.0/vendor/fs2-0.4.1/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/src/lib.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,450 @@ +#![cfg_attr(test, feature(test))] +#![deny(warnings)] + +#[cfg(unix)] +mod unix; +#[cfg(unix)] +use unix as sys; + +#[cfg(windows)] +mod windows; +#[cfg(windows)] +use windows as sys; + +use std::fs::File; +use std::io::{Error, Result}; +use std::path::Path; + +/// Extension trait for `std::fs::File` which provides allocation, duplication and locking methods. +/// +/// ## Notes on File Locks +/// +/// This library provides whole-file locks in both shared (read) and exclusive +/// (read-write) varieties. +/// +/// File locks are a cross-platform hazard since the file lock APIs exposed by +/// operating system kernels vary in subtle and not-so-subtle ways. +/// +/// The API exposed by this library can be safely used across platforms as long +/// as the following rules are followed: +/// +/// * Multiple locks should not be created on an individual `File` instance +/// concurrently. +/// * Duplicated files should not be locked without great care. +/// * Files to be locked should be opened with at least read or write +/// permissions. +/// * File locks may only be relied upon to be advisory. +/// +/// See the tests in `lib.rs` for cross-platform lock behavior that may be +/// relied upon; see the tests in `unix.rs` and `windows.rs` for examples of +/// platform-specific behavior. File locks are implemented with +/// [`flock(2)`](http://man7.org/linux/man-pages/man2/flock.2.html) on Unix and +/// [`LockFile`](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365202(v=vs.85).aspx) +/// on Windows. +pub trait FileExt { + + /// Returns a duplicate instance of the file. + /// + /// The returned file will share the same file position as the original + /// file. + /// + /// # Notes + /// + /// This is implemented with + /// [`dup(2)`](http://man7.org/linux/man-pages/man2/dup.2.html) on Unix and + /// [`DuplicateHandle`](https://msdn.microsoft.com/en-us/library/windows/desktop/ms724251(v=vs.85).aspx) + /// on Windows. + fn duplicate(&self) -> Result; + + /// Returns the amount of physical space allocated for a file. + fn allocated_size(&self) -> Result; + + /// Ensures that at least `len` bytes of disk space are allocated for the + /// file, and the file size is at least `len` bytes. After a successful call + /// to `allocate`, subsequent writes to the file within the specified length + /// are guaranteed not to fail because of lack of disk space. + fn allocate(&self, len: u64) -> Result<()>; + + /// Locks the file for shared usage, blocking if the file is currently + /// locked exclusively. + fn lock_shared(&self) -> Result<()>; + + /// Locks the file for exclusive usage, blocking if the file is currently + /// locked. + fn lock_exclusive(&self) -> Result<()>; + + /// Locks the file for shared usage, or returns a an error if the file is + /// currently locked (see `lock_contended_error`). + fn try_lock_shared(&self) -> Result<()>; + + /// Locks the file for shared usage, or returns a an error if the file is + /// currently locked (see `lock_contended_error`). + fn try_lock_exclusive(&self) -> Result<()>; + + /// Unlocks the file. + fn unlock(&self) -> Result<()>; +} + +impl FileExt for File { + fn duplicate(&self) -> Result { + sys::duplicate(self) + } + fn allocated_size(&self) -> Result { + sys::allocated_size(self) + } + fn allocate(&self, len: u64) -> Result<()> { + sys::allocate(self, len) + } + fn lock_shared(&self) -> Result<()> { + sys::lock_shared(self) + } + fn lock_exclusive(&self) -> Result<()> { + sys::lock_exclusive(self) + } + fn try_lock_shared(&self) -> Result<()> { + sys::try_lock_shared(self) + } + fn try_lock_exclusive(&self) -> Result<()> { + sys::try_lock_exclusive(self) + } + fn unlock(&self) -> Result<()> { + sys::unlock(self) + } +} + +/// Returns the error that a call to a try lock method on a contended file will +/// return. +pub fn lock_contended_error() -> Error { + sys::lock_error() +} + +/// FsStats contains some common stats about a file system. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct FsStats { + free_space: u64, + available_space: u64, + total_space: u64, + allocation_granularity: u64, +} + +impl FsStats { + /// Returns the number of free bytes in the file system containing the provided + /// path. + pub fn free_space(&self) -> u64 { + self.free_space + } + + /// Returns the available space in bytes to non-priveleged users in the file + /// system containing the provided path. + pub fn available_space(&self) -> u64 { + self.available_space + } + + /// Returns the total space in bytes in the file system containing the provided + /// path. + pub fn total_space(&self) -> u64 { + self.total_space + } + + /// Returns the filesystem's disk space allocation granularity in bytes. + /// The provided path may be for any file in the filesystem. + /// + /// On Posix, this is equivalent to the filesystem's block size. + /// On Windows, this is equivalent to the filesystem's cluster size. + pub fn allocation_granularity(&self) -> u64 { + self.allocation_granularity + } +} + +/// Get the stats of the file system containing the provided path. +pub fn statvfs

(path: P) -> Result where P: AsRef { + sys::statvfs(path.as_ref()) +} + +/// Returns the number of free bytes in the file system containing the provided +/// path. +pub fn free_space

(path: P) -> Result where P: AsRef { + statvfs(path).map(|stat| stat.free_space) +} + +/// Returns the available space in bytes to non-priveleged users in the file +/// system containing the provided path. +pub fn available_space

(path: P) -> Result where P: AsRef { + statvfs(path).map(|stat| stat.available_space) +} + +/// Returns the total space in bytes in the file system containing the provided +/// path. +pub fn total_space

(path: P) -> Result where P: AsRef { + statvfs(path).map(|stat| stat.total_space) +} + +/// Returns the filesystem's disk space allocation granularity in bytes. +/// The provided path may be for any file in the filesystem. +/// +/// On Posix, this is equivalent to the filesystem's block size. +/// On Windows, this is equivalent to the filesystem's cluster size. +pub fn allocation_granularity

(path: P) -> Result where P: AsRef { + statvfs(path).map(|stat| stat.allocation_granularity) +} + +#[cfg(test)] +mod test { + + extern crate tempdir; + extern crate test; + + use std::fs; + use super::*; + use std::io::{Read, Seek, SeekFrom, Write}; + + /// Tests file duplication. + #[test] + fn duplicate() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let mut file1 = + fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + let mut file2 = file1.duplicate().unwrap(); + + // Write into the first file and then drop it. + file1.write_all(b"foo").unwrap(); + drop(file1); + + let mut buf = vec![]; + + // Read from the second file; since the position is shared it will already be at EOF. + file2.read_to_end(&mut buf).unwrap(); + assert_eq!(0, buf.len()); + + // Rewind and read. + file2.seek(SeekFrom::Start(0)).unwrap(); + file2.read_to_end(&mut buf).unwrap(); + assert_eq!(&buf, &b"foo"); + } + + /// Tests shared file lock operations. + #[test] + fn lock_shared() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + let file3 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + + // Concurrent shared access is OK, but not shared and exclusive. + file1.lock_shared().unwrap(); + file2.lock_shared().unwrap(); + assert_eq!(file3.try_lock_exclusive().unwrap_err().kind(), + lock_contended_error().kind()); + file1.unlock().unwrap(); + assert_eq!(file3.try_lock_exclusive().unwrap_err().kind(), + lock_contended_error().kind()); + + // Once all shared file locks are dropped, an exclusive lock may be created; + file2.unlock().unwrap(); + file3.lock_exclusive().unwrap(); + } + + /// Tests exclusive file lock operations. + #[test] + fn lock_exclusive() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + + // No other access is possible once an exclusive lock is created. + file1.lock_exclusive().unwrap(); + assert_eq!(file2.try_lock_exclusive().unwrap_err().kind(), + lock_contended_error().kind()); + assert_eq!(file2.try_lock_shared().unwrap_err().kind(), + lock_contended_error().kind()); + + // Once the exclusive lock is dropped, the second file is able to create a lock. + file1.unlock().unwrap(); + file2.lock_exclusive().unwrap(); + } + + /// Tests that a lock is released after the file that owns it is dropped. + #[test] + fn lock_cleanup() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + + file1.lock_exclusive().unwrap(); + assert_eq!(file2.try_lock_shared().unwrap_err().kind(), + lock_contended_error().kind()); + + // Drop file1; the lock should be released. + drop(file1); + file2.lock_shared().unwrap(); + } + + /// Tests file allocation. + #[test] + fn allocate() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); + let blksize = allocation_granularity(&path).unwrap(); + + // New files are created with no allocated size. + assert_eq!(0, file.allocated_size().unwrap()); + assert_eq!(0, file.metadata().unwrap().len()); + + // Allocate space for the file, checking that the allocated size steps + // up by block size, and the file length matches the allocated size. + + file.allocate(2 * blksize - 1).unwrap(); + assert_eq!(2 * blksize, file.allocated_size().unwrap()); + assert_eq!(2 * blksize - 1, file.metadata().unwrap().len()); + + // Truncate the file, checking that the allocated size steps down by + // block size. + + file.set_len(blksize + 1).unwrap(); + assert_eq!(2 * blksize, file.allocated_size().unwrap()); + assert_eq!(blksize + 1, file.metadata().unwrap().len()); + } + + /// Checks filesystem space methods. + #[test] + fn filesystem_space() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let total_space = total_space(&tempdir.path()).unwrap(); + let free_space = free_space(&tempdir.path()).unwrap(); + let available_space = available_space(&tempdir.path()).unwrap(); + + assert!(total_space > free_space); + assert!(total_space > available_space); + assert!(available_space <= free_space); + } + + /// Benchmarks creating and removing a file. This is a baseline benchmark + /// for comparing against the truncate and allocate benchmarks. + #[bench] + fn bench_file_create(b: &mut test::Bencher) { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("file"); + + b.iter(|| { + fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(&path) + .unwrap(); + fs::remove_file(&path).unwrap(); + }); + } + + /// Benchmarks creating a file, truncating it to 32MiB, and deleting it. + #[bench] + fn bench_file_truncate(b: &mut test::Bencher) { + let size = 32 * 1024 * 1024; + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("file"); + + b.iter(|| { + let file = fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(&path) + .unwrap(); + file.set_len(size).unwrap(); + fs::remove_file(&path).unwrap(); + }); + } + + /// Benchmarks creating a file, allocating 32MiB for it, and deleting it. + #[bench] + fn bench_file_allocate(b: &mut test::Bencher) { + let size = 32 * 1024 * 1024; + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("file"); + + b.iter(|| { + let file = fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(&path) + .unwrap(); + file.allocate(size).unwrap(); + fs::remove_file(&path).unwrap(); + }); + } + + /// Benchmarks creating a file, allocating 32MiB for it, and deleting it. + #[bench] + fn bench_allocated_size(b: &mut test::Bencher) { + let size = 32 * 1024 * 1024; + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("file"); + let file = fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(&path) + .unwrap(); + file.allocate(size).unwrap(); + + b.iter(|| { + file.allocated_size().unwrap(); + }); + } + + /// Benchmarks duplicating a file descriptor or handle. + #[bench] + fn bench_duplicate(b: &mut test::Bencher) { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + + b.iter(|| test::black_box(file.duplicate().unwrap())); + } + + /// Benchmarks locking and unlocking a file lock. + #[bench] + fn bench_lock_unlock(b: &mut test::Bencher) { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + + b.iter(|| { + file.lock_exclusive().unwrap(); + file.unlock().unwrap(); + }); + } + + /// Benchmarks the free space method. + #[bench] + fn bench_free_space(b: &mut test::Bencher) { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + b.iter(|| { + test::black_box(free_space(&tempdir.path()).unwrap()); + }); + } + + /// Benchmarks the available space method. + #[bench] + fn bench_available_space(b: &mut test::Bencher) { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + b.iter(|| { + test::black_box(available_space(&tempdir.path()).unwrap()); + }); + } + + /// Benchmarks the total space method. + #[bench] + fn bench_total_space(b: &mut test::Bencher) { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + b.iter(|| { + test::black_box(total_space(&tempdir.path()).unwrap()); + }); + } +} diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/src/unix.rs cargo-0.19.0/vendor/fs2-0.4.1/src/unix.rs --- cargo-0.17.0/vendor/fs2-0.4.1/src/unix.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/src/unix.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,210 @@ +extern crate libc; + +use std::ffi::CString; +use std::fs::File; +use std::io::{Error, ErrorKind, Result}; +use std::mem; +use std::os::unix::ffi::OsStrExt; +use std::os::unix::fs::MetadataExt; +use std::os::unix::io::{AsRawFd, FromRawFd}; +use std::path::Path; + +use FsStats; + +pub fn duplicate(file: &File) -> Result { + unsafe { + let fd = libc::dup(file.as_raw_fd()); + + if fd < 0 { + Err(Error::last_os_error()) + } else { + Ok(File::from_raw_fd(fd)) + } + } +} + +pub fn lock_shared(file: &File) -> Result<()> { + flock(file, libc::LOCK_SH) +} + +pub fn lock_exclusive(file: &File) -> Result<()> { + flock(file, libc::LOCK_EX) +} + +pub fn try_lock_shared(file: &File) -> Result<()> { + flock(file, libc::LOCK_SH | libc::LOCK_NB) +} + +pub fn try_lock_exclusive(file: &File) -> Result<()> { + flock(file, libc::LOCK_EX | libc::LOCK_NB) +} + +pub fn unlock(file: &File) -> Result<()> { + flock(file, libc::LOCK_UN) +} + +pub fn lock_error() -> Error { + Error::from_raw_os_error(libc::EWOULDBLOCK) +} + +fn flock(file: &File, flag: libc::c_int) -> Result<()> { + let ret = unsafe { libc::flock(file.as_raw_fd(), flag) }; + if ret < 0 { Err(Error::last_os_error()) } else { Ok(()) } +} + +pub fn allocated_size(file: &File) -> Result { + file.metadata().map(|m| m.blocks() as u64 * 512) +} + +#[cfg(any(target_os = "linux", + target_os = "freebsd", + target_os = "android", + target_os = "nacl"))] +pub fn allocate(file: &File, len: u64) -> Result<()> { + let ret = unsafe { libc::posix_fallocate(file.as_raw_fd(), 0, len as libc::off_t) }; + if ret == 0 { Ok(()) } else { Err(Error::last_os_error()) } +} + +#[cfg(any(target_os = "macos", target_os = "ios"))] +pub fn allocate(file: &File, len: u64) -> Result<()> { + let stat = try!(file.metadata()); + + if len > stat.blocks() as u64 * 512 { + let mut fstore = libc::fstore_t { + fst_flags: libc::F_ALLOCATECONTIG, + fst_posmode: libc::F_PEOFPOSMODE, + fst_offset: 0, + fst_length: len as libc::off_t, + fst_bytesalloc: 0, + }; + + let ret = unsafe { libc::fcntl(file.as_raw_fd(), libc::F_PREALLOCATE, &fstore) }; + if ret == -1 { + // Unable to allocate contiguous disk space; attempt to allocate non-contiguously. + fstore.fst_flags = libc::F_ALLOCATEALL; + let ret = unsafe { libc::fcntl(file.as_raw_fd(), libc::F_PREALLOCATE, &fstore) }; + if ret == -1 { + return Err(Error::last_os_error()); + } + } + } + + if len > stat.size() as u64 { + file.set_len(len) + } else { + Ok(()) + } +} + +#[cfg(any(target_os = "openbsd", + target_os = "netbsd", + target_os = "dragonfly", + target_os = "solaris"))] +pub fn allocate(file: &File, len: u64) -> Result<()> { + // No file allocation API available, just set the length if necessary. + if len > try!(file.metadata()).len() as u64 { + file.set_len(len) + } else { + Ok(()) + } +} + +pub fn statvfs(path: &Path) -> Result { + let cstr = match CString::new(path.as_os_str().as_bytes()) { + Ok(cstr) => cstr, + Err(..) => return Err(Error::new(ErrorKind::InvalidInput, "path contained a null")), + }; + + unsafe { + let mut stat: libc::statvfs = mem::zeroed(); + // danburkert/fs2-rs#1: cast is necessary for platforms where c_char != u8. + if libc::statvfs(cstr.as_ptr() as *const _, &mut stat) != 0 { + Err(Error::last_os_error()) + } else { + Ok(FsStats { + free_space: stat.f_frsize as u64 * stat.f_bfree as u64, + available_space: stat.f_frsize as u64 * stat.f_bavail as u64, + total_space: stat.f_frsize as u64 * stat.f_blocks as u64, + allocation_granularity: stat.f_frsize as u64, + }) + } + } +} + +#[cfg(test)] +mod test { + extern crate tempdir; + extern crate libc; + + use std::fs::{self, File}; + use std::os::unix::io::AsRawFd; + + use {FileExt, lock_contended_error}; + + /// The duplicate method returns a file with a new file descriptor. + #[test] + fn duplicate_new_fd() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); + let file2 = file1.duplicate().unwrap(); + assert!(file1.as_raw_fd() != file2.as_raw_fd()); + } + + /// The duplicate method should preservesthe close on exec flag. + #[test] + fn duplicate_cloexec() { + + fn flags(file: &File) -> libc::c_int { + unsafe { libc::fcntl(file.as_raw_fd(), libc::F_GETFL, 0) } + } + + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); + let file2 = file1.duplicate().unwrap(); + + assert_eq!(flags(&file1), flags(&file2)); + } + + /// Tests that locking a file descriptor will replace any existing locks + /// held on the file descriptor. + #[test] + fn lock_replace() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); + let file2 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); + + // Creating a shared lock will drop an exclusive lock. + file1.lock_exclusive().unwrap(); + file1.lock_shared().unwrap(); + file2.lock_shared().unwrap(); + + // Attempting to replace a shared lock with an exclusive lock will fail + // with multiple lock holders, and remove the original shared lock. + assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), + lock_contended_error().raw_os_error()); + file1.lock_shared().unwrap(); + } + + /// Tests that locks are shared among duplicated file descriptors. + #[test] + fn lock_duplicate() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); + let file2 = file1.duplicate().unwrap(); + let file3 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); + + // Create a lock through fd1, then replace it through fd2. + file1.lock_shared().unwrap(); + file2.lock_exclusive().unwrap(); + assert_eq!(file3.try_lock_shared().unwrap_err().raw_os_error(), + lock_contended_error().raw_os_error()); + + // Either of the file descriptors should be able to unlock. + file1.unlock().unwrap(); + file3.lock_shared().unwrap(); + } +} diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/src/windows.rs cargo-0.19.0/vendor/fs2-0.4.1/src/windows.rs --- cargo-0.17.0/vendor/fs2-0.4.1/src/windows.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/src/windows.rs 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,271 @@ +extern crate kernel32; +extern crate winapi; + +use std::fs::File; +use std::io::{Error, Result}; +use std::mem; +use std::os::windows::ffi::OsStrExt; +use std::os::windows::io::{AsRawHandle, FromRawHandle}; +use std::path::Path; +use std::ptr; + +use FsStats; + +pub fn duplicate(file: &File) -> Result { + unsafe { + let mut handle = ptr::null_mut(); + let current_process = kernel32::GetCurrentProcess(); + let ret = kernel32::DuplicateHandle(current_process, + file.as_raw_handle(), + current_process, + &mut handle, + 0, + true as winapi::BOOL, + winapi::DUPLICATE_SAME_ACCESS); + if ret == 0 { + Err(Error::last_os_error()) + } else { + Ok(File::from_raw_handle(handle)) + } + } +} + +pub fn allocated_size(file: &File) -> Result { + unsafe { + let mut info: winapi::FILE_STANDARD_INFO = mem::zeroed(); + + let ret = kernel32::GetFileInformationByHandleEx( + file.as_raw_handle(), + winapi::FileStandardInfo, + &mut info as *mut _ as *mut _, + mem::size_of::() as winapi::DWORD); + + if ret == 0 { + Err(Error::last_os_error()) + } else { + Ok(info.AllocationSize as u64) + } + } +} + +pub fn allocate(file: &File, len: u64) -> Result<()> { + if try!(allocated_size(file)) < len { + unsafe { + let mut info: winapi::FILE_ALLOCATION_INFO = mem::zeroed(); + info.AllocationSize = len as i64; + let ret = kernel32::SetFileInformationByHandle( + file.as_raw_handle(), + winapi::FileAllocationInfo, + &mut info as *mut _ as *mut _, + mem::size_of::() as winapi::DWORD); + if ret == 0 { + return Err(Error::last_os_error()); + } + } + } + if try!(file.metadata()).len() < len { + file.set_len(len) + } else { + Ok(()) + } +} + +pub fn lock_shared(file: &File) -> Result<()> { + lock_file(file, 0) +} + +pub fn lock_exclusive(file: &File) -> Result<()> { + lock_file(file, winapi::LOCKFILE_EXCLUSIVE_LOCK) +} + +pub fn try_lock_shared(file: &File) -> Result<()> { + lock_file(file, winapi::LOCKFILE_FAIL_IMMEDIATELY) +} + +pub fn try_lock_exclusive(file: &File) -> Result<()> { + lock_file(file, winapi::LOCKFILE_EXCLUSIVE_LOCK | winapi::LOCKFILE_FAIL_IMMEDIATELY) +} + +pub fn unlock(file: &File) -> Result<()> { + unsafe { + let ret = kernel32::UnlockFile(file.as_raw_handle(), 0, 0, !0, !0); + if ret == 0 { Err(Error::last_os_error()) } else { Ok(()) } + } +} + +pub fn lock_error() -> Error { + Error::from_raw_os_error(winapi::ERROR_LOCK_VIOLATION as i32) +} + +fn lock_file(file: &File, flags: winapi::DWORD) -> Result<()> { + unsafe { + let mut overlapped = mem::zeroed(); + let ret = kernel32::LockFileEx(file.as_raw_handle(), flags, 0, !0, !0, &mut overlapped); + if ret == 0 { Err(Error::last_os_error()) } else { Ok(()) } + } +} + +fn volume_path(path: &Path, volume_path: &mut [u16]) -> Result<()> { + let path_utf8: Vec = path.as_os_str().encode_wide().chain(Some(0)).collect(); + unsafe { + let ret = kernel32::GetVolumePathNameW(path_utf8.as_ptr(), + volume_path.as_mut_ptr(), + volume_path.len() as winapi::DWORD); + if ret == 0 { Err(Error::last_os_error()) } else { Ok(()) + } + } +} + +pub fn statvfs(path: &Path) -> Result { + let root_path: &mut [u16] = &mut [0; 261]; + try!(volume_path(path, root_path)); + unsafe { + + let mut sectors_per_cluster = 0; + let mut bytes_per_sector = 0; + let mut number_of_free_clusters = 0; + let mut total_number_of_clusters = 0; + let ret = kernel32::GetDiskFreeSpaceW(root_path.as_ptr(), + &mut sectors_per_cluster, + &mut bytes_per_sector, + &mut number_of_free_clusters, + &mut total_number_of_clusters); + if ret == 0 { + Err(Error::last_os_error()) + } else { + let bytes_per_cluster = sectors_per_cluster as u64 * bytes_per_sector as u64; + let free_space = bytes_per_cluster * number_of_free_clusters as u64; + let total_space = bytes_per_cluster * total_number_of_clusters as u64; + Ok(FsStats { + free_space: free_space, + available_space: free_space, + total_space: total_space, + allocation_granularity: bytes_per_cluster, + }) + } + } +} + +#[cfg(test)] +mod test { + + extern crate tempdir; + + use std::fs; + use std::os::windows::io::AsRawHandle; + + use {FileExt, lock_contended_error}; + + /// The duplicate method returns a file with a new file handle. + #[test] + fn duplicate_new_handle() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); + let file2 = file1.duplicate().unwrap(); + assert!(file1.as_raw_handle() != file2.as_raw_handle()); + } + + /// A duplicated file handle does not have access to the original handle's locks. + #[test] + fn lock_duplicate_handle_independence() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + let file2 = file1.duplicate().unwrap(); + + // Locking the original file handle will block the duplicate file handle from opening a lock. + file1.lock_shared().unwrap(); + assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), + lock_contended_error().raw_os_error()); + + // Once the original file handle is unlocked, the duplicate handle can proceed with a lock. + file1.unlock().unwrap(); + file2.lock_exclusive().unwrap(); + } + + /// A file handle may not be exclusively locked multiple times, or exclusively locked and then + /// shared locked. + #[test] + fn lock_non_reentrant() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + + // Multiple exclusive locks fails. + file.lock_exclusive().unwrap(); + assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), + lock_contended_error().raw_os_error()); + file.unlock().unwrap(); + + // Shared then Exclusive locks fails. + file.lock_shared().unwrap(); + assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), + lock_contended_error().raw_os_error()); + } + + /// A file handle can hold an exclusive lock and any number of shared locks, all of which must + /// be unlocked independently. + #[test] + fn lock_layering() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + + // Open two shared locks on the file, and then try and fail to open an exclusive lock. + file.lock_exclusive().unwrap(); + file.lock_shared().unwrap(); + file.lock_shared().unwrap(); + assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), + lock_contended_error().raw_os_error()); + + // Pop one of the shared locks and try again. + file.unlock().unwrap(); + assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), + lock_contended_error().raw_os_error()); + + // Pop the second shared lock and try again. + file.unlock().unwrap(); + assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), + lock_contended_error().raw_os_error()); + + // Pop the exclusive lock and finally succeed. + file.unlock().unwrap(); + file.lock_exclusive().unwrap(); + } + + /// A file handle with multiple open locks will have all locks closed on drop. + #[test] + fn lock_layering_cleanup() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + + // Open two shared locks on the file, and then try and fail to open an exclusive lock. + file1.lock_shared().unwrap(); + assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), + lock_contended_error().raw_os_error()); + + drop(file1); + file2.lock_exclusive().unwrap(); + } + + /// A file handle's locks will not be released until the original handle and all of its + /// duplicates have been closed. This on really smells like a bug in Windows. + #[test] + fn lock_duplicate_cleanup() { + let tempdir = tempdir::TempDir::new("fs2").unwrap(); + let path = tempdir.path().join("fs2"); + let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); + let file2 = file1.duplicate().unwrap(); + + // Open a lock on the original handle, then close it. + file1.lock_shared().unwrap(); + drop(file1); + + // Attempting to create a lock on the file with the duplicate handle will fail. + assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), + lock_contended_error().raw_os_error()); + } +} diff -Nru cargo-0.17.0/vendor/fs2-0.4.1/.travis.yml cargo-0.19.0/vendor/fs2-0.4.1/.travis.yml --- cargo-0.17.0/vendor/fs2-0.4.1/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/fs2-0.4.1/.travis.yml 2017-08-16 09:07:20.000000000 +0000 @@ -0,0 +1,21 @@ +language: rust + +rust: +- 1.8.0 +- stable +- nightly + +os: + - linux + - osx + +env: + matrix: + - ARCH=x86_64 + - ARCH=i686 + +script: + - cargo build --verbose + - if [[ $TRAVIS_RUST_VERSION = nightly* ]]; then + env RUST_BACKTRACE=1 cargo test -v; + fi diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/appveyor.yml cargo-0.19.0/vendor/gcc-0.3.39/appveyor.yml --- cargo-0.17.0/vendor/gcc-0.3.39/appveyor.yml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -environment: - matrix: - - TARGET: x86_64-pc-windows-msvc - ARCH: amd64 - VS: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat - - TARGET: x86_64-pc-windows-msvc - ARCH: amd64 - VS: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat - - TARGET: i686-pc-windows-msvc - ARCH: x86 - VS: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat - - TARGET: i686-pc-windows-msvc - ARCH: x86 - VS: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat - - TARGET: x86_64-pc-windows-gnu - MSYS_BITS: 64 - - TARGET: i686-pc-windows-gnu - MSYS_BITS: 32 -install: - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" - - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - - if defined VS call "%VS%" %ARCH% - - set PATH=%PATH%;C:\Program Files (x86)\Rust\bin - - if defined MSYS_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS_BITS%\bin - - rustc -V - - cargo -V - -build: false - -test_script: - - cargo test --target %TARGET% - - cargo test --features parallel --target %TARGET% - - cargo test --manifest-path gcc-test/Cargo.toml --target %TARGET% - - cargo test --manifest-path gcc-test/Cargo.toml --features parallel --target %TARGET% - - cargo test --manifest-path gcc-test/Cargo.toml --release --target %TARGET% diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/.cargo-checksum.json cargo-0.19.0/vendor/gcc-0.3.39/.cargo-checksum.json --- cargo-0.17.0/vendor/gcc-0.3.39/.cargo-checksum.json 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"675ffe583db77282d010306f29e6d81e5070ab081deddd0300137dfbd2cb83de","Cargo.toml":"248470bdc3bee25bd85206afeb95a4789413deac10975cfc8c553f438011d855","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"ecb2d93f4c81edbd48d8742ff7887dc0a4530a5890967839090bbc972d49bebe","appveyor.yml":"46c77d913eaa45871296942c2cd96ef092c9dcaf19201cb5c500a5107faeb06f","src/bin/gcc-shim.rs":"11edfe1fc6f932bd42ffffda5145833302bc163e0b87dc0d54f4bd0997ad4708","src/lib.rs":"7d6d231e23f967b1cd05b016d99823a0f6d2bd4d632cedf506c493d7cc9824b3","src/registry.rs":"3e2a42581ebb82e325dd5600c6571cef937b35003b2927dc618967f5238a2058","src/windows_registry.rs":"1f4211caec5a192b5f05c8a47efb27aa6a0ab976c659b9318a0cf603a28d6746","tests/cc_env.rs":"d92c5e3d3d43ac244e63b2cd2c93a521fcf124bf1ccf8d4c6bfa7f8333d88976","tests/support/mod.rs":"f4dad5a8133c3dd6678d9a3de057b82e624ef547b9b3e4ac9508a48962fc387b","tests/test.rs":"164220f11be2eebc20315826513999970660a82feff8cc4b15b4e9d73d98324e"},"package":"771e4a97ff6f237cf0f7d5f5102f6e28bb9743814b6198d684da5c58b76c11e0"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/Cargo.toml cargo-0.19.0/vendor/gcc-0.3.39/Cargo.toml --- cargo-0.17.0/vendor/gcc-0.3.39/Cargo.toml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -[package] - -name = "gcc" -version = "0.3.39" -authors = ["Alex Crichton "] -license = "MIT/Apache-2.0" -repository = "https://github.com/alexcrichton/gcc-rs" -documentation = "http://alexcrichton.com/gcc-rs" -description = """ -A build-time dependency for Cargo build scripts to assist in invoking the native -C compiler to compile native C code into a static archive to be linked into Rust -code. -""" -keywords = ["build-dependencies"] - -[dependencies] -rayon = { version = "0.4", optional = true } - -[features] -parallel = ["rayon"] - -[dev-dependencies] -tempdir = "0.3" diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/.gitignore cargo-0.19.0/vendor/gcc-0.3.39/.gitignore --- cargo-0.17.0/vendor/gcc-0.3.39/.gitignore 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -target -Cargo.lock diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/LICENSE-APACHE cargo-0.19.0/vendor/gcc-0.3.39/LICENSE-APACHE --- cargo-0.17.0/vendor/gcc-0.3.39/LICENSE-APACHE 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/LICENSE-MIT cargo-0.19.0/vendor/gcc-0.3.39/LICENSE-MIT --- cargo-0.17.0/vendor/gcc-0.3.39/LICENSE-MIT 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) 2014 Alex Crichton - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/README.md cargo-0.19.0/vendor/gcc-0.3.39/README.md --- cargo-0.17.0/vendor/gcc-0.3.39/README.md 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,161 +0,0 @@ -# gcc-rs - -A library to compile C/C++ code into a Rust library/application. - -[![Build Status](https://travis-ci.org/alexcrichton/gcc-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/gcc-rs) -[![Build status](https://ci.appveyor.com/api/projects/status/onu270iw98h81nwv?svg=true)](https://ci.appveyor.com/project/alexcrichton/gcc-rs) - -[Documentation](http://alexcrichton.com/gcc-rs) - -A simple library meant to be used as a build dependency with Cargo packages in -order to build a set of C/C++ files into a static archive. Note that while this -crate is called "gcc", it actually calls out to the most relevant compile for -a platform, for example using `cl` on MSVC. That is, this crate does indeed work -on MSVC! - -## Using gcc-rs - -First, you'll want to both add a build script for your crate (`build.rs`) and -also add this crate to your `Cargo.toml` via: - -```toml -[package] -# ... -build = "build.rs" - -[build-dependencies] -gcc = "0.3" -``` - -Next up, you'll want to write a build script like so: - -```rust,no_run -// build.rs - -extern crate gcc; - -fn main() { - gcc::compile_library("libfoo.a", &["foo.c", "bar.c"]); -} -``` - -And that's it! Running `cargo build` should take care of the rest and your Rust -application will now have the C files `foo.c` and `bar.c` compiled into it. You -can call the functions in Rust by declaring functions in your Rust code like so: - -``` -extern { - fn foo_function(); - fn bar_function(); -} - -pub fn call() { - unsafe { - foo_function(); - bar_function(); - } -} - -fn main() { - // ... -} -``` - -## External configuration via environment variables - -To control the programs and flags used for building, the builder can set a -number of different environment variables. - -* `CFLAGS` - a series of space separated flags passed to "gcc". Note that - individual flags cannot currently contain spaces, so doing - something like: "-L=foo\ bar" is not possible. -* `CC` - the actual C compiler used. Note that this is used as an exact - executable name, so (for example) no extra flags can be passed inside - this variable, and the builder must ensure that there aren't any - trailing spaces. This compiler must understand the `-c` flag. For - certain `TARGET`s, it also is assumed to know about other flags (most - common is `-fPIC`). -* `AR` - the `ar` (archiver) executable to use to build the static library. - -Each of these variables can also be supplied with certain prefixes and suffixes, -in the following prioritized order: - -1. `_` - for example, `CC_x86_64-unknown-linux-gnu` -2. `_` - for example, `CC_x86_64_unknown_linux_gnu` -3. `_` - for example, `HOST_CC` or `TARGET_CFLAGS` -4. `` - a plain `CC`, `AR` as above. - -If none of these variables exist, gcc-rs uses built-in defaults - -In addition to the the above optional environment variables, `gcc-rs` has some -functions with hard requirements on some variables supplied by [cargo's -build-script driver][cargo] that it has the `TARGET`, `OUT_DIR`, `OPT_LEVEL`, -and `HOST` variables. - -[cargo]: http://doc.crates.io/build-script.html#inputs-to-the-build-script - -## Optional features - -Currently gcc-rs supports parallel compilation (think `make -jN`) but this -feature is turned off by default. To enable gcc-rs to compile C/C++ in parallel, -you can change your dependency to: - -```toml -[build-dependencies] -gcc = { version = "0.3", features = ["parallel"] } -``` - -By default gcc-rs will limit parallelism to `$NUM_JOBS`, or if not present it -will limit it to the number of cpus on the machine. - -## Compile-time Requirements - -To work properly this crate needs access to a C compiler when the build script -is being run. This crate does not ship a C compiler with it. The compiler -required varies per platform, but there are three broad categories: - -* Unix platforms require `cc` to be the C compiler. This can be found by - installing gcc/clang on Linux distributions and Xcode on OSX, for example. -* Windows platforms targeting MSVC (e.g. your target triple ends in `-msvc`) - require `cl.exe` to be available and in `PATH`. This is typically found in - standard Visual Studio installations and the `PATH` can be set up by running - the appropriate developer tools shell. -* Windows platforms targeting MinGW (e.g. your target triple ends in `-gnu`) - require `gcc` to be available in `PATH`. We recommend the - [MinGW-w64](http://mingw-w64.org) distribution, which is using the - [Win-builds](http://win-builds.org) installation system. - You may also acquire it via - [MSYS2](http://msys2.github.io), as explained [here][msys2-help]. Make sure - to install the appropriate architecture corresponding to your installation of - rustc. GCC from older [MinGW](http://www.mingw.org) project is compatible - only with 32-bit rust compiler. - -[msys2-help]: http://github.com/rust-lang/rust#building-on-windows - -## C++ support - -`gcc-rs` supports C++ libraries compilation by using the `cpp` method on -`Config`: - -```rust,no_run -extern crate gcc; - -fn main() { - gcc::Config::new() - .cpp(true) // Switch to C++ library compilation. - .file("foo.cpp") - .compile("libfoo.a"); -} -``` - -When using C++ library compilation switch, the `CXX` and `CXXFLAGS` env -variables are used instead of `CC` and `CFLAGS` and the C++ standard library is -linked to the crate target. - -## License - -`gcc-rs` is primarily distributed under the terms of both the MIT license and -the Apache License (Version 2.0), with portions covered by various BSD-like -licenses. - -See LICENSE-APACHE, and LICENSE-MIT for details. diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/src/bin/gcc-shim.rs cargo-0.19.0/vendor/gcc-0.3.39/src/bin/gcc-shim.rs --- cargo-0.17.0/vendor/gcc-0.3.39/src/bin/gcc-shim.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/src/bin/gcc-shim.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -#![cfg_attr(test, allow(dead_code))] - -use std::env; -use std::fs::File; -use std::io::prelude::*; -use std::path::PathBuf; - -fn main() { - let out_dir = PathBuf::from(env::var_os("GCCTEST_OUT_DIR").unwrap()); - for i in 0.. { - let candidate = out_dir.join(format!("out{}", i)); - if candidate.exists() { - continue - } - let mut f = File::create(candidate).unwrap(); - for arg in env::args().skip(1) { - writeln!(f, "{}", arg).unwrap(); - } - - File::create(out_dir.join("libfoo.a")).unwrap(); - break - } -} diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/src/lib.rs cargo-0.19.0/vendor/gcc-0.3.39/src/lib.rs --- cargo-0.17.0/vendor/gcc-0.3.39/src/lib.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,985 +0,0 @@ -//! A library for build scripts to compile custom C code -//! -//! This library is intended to be used as a `build-dependencies` entry in -//! `Cargo.toml`: -//! -//! ```toml -//! [build-dependencies] -//! gcc = "0.3" -//! ``` -//! -//! The purpose of this crate is to provide the utility functions necessary to -//! compile C code into a static archive which is then linked into a Rust crate. -//! The top-level `compile_library` function serves as a convenience and more -//! advanced configuration is available through the `Config` builder. -//! -//! This crate will automatically detect situations such as cross compilation or -//! other environment variables set by Cargo and will build code appropriately. -//! -//! # Examples -//! -//! Use the default configuration: -//! -//! ```no_run -//! extern crate gcc; -//! -//! fn main() { -//! gcc::compile_library("libfoo.a", &["src/foo.c"]); -//! } -//! ``` -//! -//! Use more advanced configuration: -//! -//! ```no_run -//! extern crate gcc; -//! -//! fn main() { -//! gcc::Config::new() -//! .file("src/foo.c") -//! .define("FOO", Some("bar")) -//! .include("src") -//! .compile("libfoo.a"); -//! } -//! ``` - -#![doc(html_root_url = "http://alexcrichton.com/gcc-rs")] -#![cfg_attr(test, deny(warnings))] -#![deny(missing_docs)] - -#[cfg(feature = "parallel")] -extern crate rayon; - -use std::env; -use std::ffi::{OsString, OsStr}; -use std::fs; -use std::io; -use std::path::{PathBuf, Path}; -use std::process::{Command, Stdio}; -use std::io::{BufReader, BufRead, Write}; - -#[cfg(windows)] -mod registry; -pub mod windows_registry; - -/// Extra configuration to pass to gcc. -pub struct Config { - include_directories: Vec, - definitions: Vec<(String, Option)>, - objects: Vec, - flags: Vec, - files: Vec, - cpp: bool, - cpp_link_stdlib: Option>, - cpp_set_stdlib: Option, - target: Option, - host: Option, - out_dir: Option, - opt_level: Option, - debug: Option, - env: Vec<(OsString, OsString)>, - compiler: Option, - archiver: Option, - cargo_metadata: bool, - pic: Option, -} - -/// Configuration used to represent an invocation of a C compiler. -/// -/// This can be used to figure out what compiler is in use, what the arguments -/// to it are, and what the environment variables look like for the compiler. -/// This can be used to further configure other build systems (e.g. forward -/// along CC and/or CFLAGS) or the `to_command` method can be used to run the -/// compiler itself. -pub struct Tool { - path: PathBuf, - args: Vec, - env: Vec<(OsString, OsString)>, -} - -/// Compile a library from the given set of input C files. -/// -/// This will simply compile all files into object files and then assemble them -/// into the output. This will read the standard environment variables to detect -/// cross compilations and such. -/// -/// This function will also print all metadata on standard output for Cargo. -/// -/// # Example -/// -/// ```no_run -/// gcc::compile_library("libfoo.a", &["foo.c", "bar.c"]); -/// ``` -pub fn compile_library(output: &str, files: &[&str]) { - let mut c = Config::new(); - for f in files.iter() { - c.file(*f); - } - c.compile(output) -} - -impl Config { - /// Construct a new instance of a blank set of configuration. - /// - /// This builder is finished with the `compile` function. - pub fn new() -> Config { - Config { - include_directories: Vec::new(), - definitions: Vec::new(), - objects: Vec::new(), - flags: Vec::new(), - files: Vec::new(), - cpp: false, - cpp_link_stdlib: None, - cpp_set_stdlib: None, - target: None, - host: None, - out_dir: None, - opt_level: None, - debug: None, - env: Vec::new(), - compiler: None, - archiver: None, - cargo_metadata: true, - pic: None, - } - } - - /// Add a directory to the `-I` or include path for headers - pub fn include>(&mut self, dir: P) -> &mut Config { - self.include_directories.push(dir.as_ref().to_path_buf()); - self - } - - /// Specify a `-D` variable with an optional value. - pub fn define(&mut self, var: &str, val: Option<&str>) -> &mut Config { - self.definitions.push((var.to_string(), val.map(|s| s.to_string()))); - self - } - - /// Add an arbitrary object file to link in - pub fn object>(&mut self, obj: P) -> &mut Config { - self.objects.push(obj.as_ref().to_path_buf()); - self - } - - /// Add an arbitrary flag to the invocation of the compiler - pub fn flag(&mut self, flag: &str) -> &mut Config { - self.flags.push(flag.to_string()); - self - } - - /// Add a file which will be compiled - pub fn file>(&mut self, p: P) -> &mut Config { - self.files.push(p.as_ref().to_path_buf()); - self - } - - /// Set C++ support. - /// - /// The other `cpp_*` options will only become active if this is set to - /// `true`. - pub fn cpp(&mut self, cpp: bool) -> &mut Config { - self.cpp = cpp; - self - } - - /// Set the standard library to link against when compiling with C++ - /// support. - /// - /// The default value of this property depends on the current target: On - /// OS X `Some("c++")` is used, when compiling for a Visual Studio based - /// target `None` is used and for other targets `Some("stdc++")` is used. - /// - /// A value of `None` indicates that no automatic linking should happen, - /// otherwise cargo will link against the specified library. - /// - /// The given library name must not contain the `lib` prefix. - pub fn cpp_link_stdlib(&mut self, cpp_link_stdlib: Option<&str>) - -> &mut Config { - self.cpp_link_stdlib = Some(cpp_link_stdlib.map(|s| s.into())); - self - } - - /// Force the C++ compiler to use the specified standard library. - /// - /// Setting this option will automatically set `cpp_link_stdlib` to the same - /// value. - /// - /// The default value of this option is always `None`. - /// - /// This option has no effect when compiling for a Visual Studio based - /// target. - /// - /// This option sets the `-stdlib` flag, which is only supported by some - /// compilers (clang, icc) but not by others (gcc). The library will not - /// detect which compiler is used, as such it is the responsibility of the - /// caller to ensure that this option is only used in conjuction with a - /// compiler which supports the `-stdlib` flag. - /// - /// A value of `None` indicates that no specific C++ standard library should - /// be used, otherwise `-stdlib` is added to the compile invocation. - /// - /// The given library name must not contain the `lib` prefix. - pub fn cpp_set_stdlib(&mut self, cpp_set_stdlib: Option<&str>) - -> &mut Config { - self.cpp_set_stdlib = cpp_set_stdlib.map(|s| s.into()); - self.cpp_link_stdlib(cpp_set_stdlib); - self - } - - /// Configures the target this configuration will be compiling for. - /// - /// This option is automatically scraped from the `TARGET` environment - /// variable by build scripts, so it's not required to call this function. - pub fn target(&mut self, target: &str) -> &mut Config { - self.target = Some(target.to_string()); - self - } - - /// Configures the host assumed by this configuration. - /// - /// This option is automatically scraped from the `HOST` environment - /// variable by build scripts, so it's not required to call this function. - pub fn host(&mut self, host: &str) -> &mut Config { - self.host = Some(host.to_string()); - self - } - - /// Configures the optimization level of the generated object files. - /// - /// This option is automatically scraped from the `OPT_LEVEL` environment - /// variable by build scripts, so it's not required to call this function. - pub fn opt_level(&mut self, opt_level: u32) -> &mut Config { - self.opt_level = Some(opt_level.to_string()); - self - } - - /// Configures the optimization level of the generated object files. - /// - /// This option is automatically scraped from the `OPT_LEVEL` environment - /// variable by build scripts, so it's not required to call this function. - pub fn opt_level_str(&mut self, opt_level: &str) -> &mut Config { - self.opt_level = Some(opt_level.to_string()); - self - } - - /// Configures whether the compiler will emit debug information when - /// generating object files. - /// - /// This option is automatically scraped from the `PROFILE` environment - /// variable by build scripts (only enabled when the profile is "debug"), so - /// it's not required to call this function. - pub fn debug(&mut self, debug: bool) -> &mut Config { - self.debug = Some(debug); - self - } - - /// Configures the output directory where all object files and static - /// libraries will be located. - /// - /// This option is automatically scraped from the `OUT_DIR` environment - /// variable by build scripts, so it's not required to call this function. - pub fn out_dir>(&mut self, out_dir: P) -> &mut Config { - self.out_dir = Some(out_dir.as_ref().to_owned()); - self - } - - /// Configures the compiler to be used to produce output. - /// - /// This option is automatically determined from the target platform or a - /// number of environment variables, so it's not required to call this - /// function. - pub fn compiler>(&mut self, compiler: P) -> &mut Config { - self.compiler = Some(compiler.as_ref().to_owned()); - self - } - - /// Configures the tool used to assemble archives. - /// - /// This option is automatically determined from the target platform or a - /// number of environment variables, so it's not required to call this - /// function. - pub fn archiver>(&mut self, archiver: P) -> &mut Config { - self.archiver = Some(archiver.as_ref().to_owned()); - self - } - /// Define whether metadata should be emitted for cargo allowing it to - /// automatically link the binary. Defaults to `true`. - pub fn cargo_metadata(&mut self, cargo_metadata: bool) -> &mut Config { - self.cargo_metadata = cargo_metadata; - self - } - - /// Configures whether the compiler will emit position independent code. - /// - /// This option defaults to `false` for `i686` and `windows-gnu` targets and to `true` for all - /// other targets. - pub fn pic(&mut self, pic: bool) -> &mut Config { - self.pic = Some(pic); - self - } - - - #[doc(hidden)] - pub fn __set_env(&mut self, a: A, b: B) -> &mut Config - where A: AsRef, B: AsRef - { - self.env.push((a.as_ref().to_owned(), b.as_ref().to_owned())); - self - } - - /// Run the compiler, generating the file `output` - /// - /// The name `output` must begin with `lib` and end with `.a` - pub fn compile(&self, output: &str) { - assert!(output.starts_with("lib")); - assert!(output.ends_with(".a")); - let lib_name = &output[3..output.len() - 2]; - let dst = self.get_out_dir(); - - let mut objects = Vec::new(); - let mut src_dst = Vec::new(); - for file in self.files.iter() { - let obj = dst.join(file).with_extension("o"); - let obj = if !obj.starts_with(&dst) { - dst.join(obj.file_name().unwrap()) - } else { - obj - }; - fs::create_dir_all(&obj.parent().unwrap()).unwrap(); - src_dst.push((file.to_path_buf(), obj.clone())); - objects.push(obj); - } - self.compile_objects(&src_dst); - self.assemble(lib_name, &dst.join(output), &objects); - - if self.get_target().contains("msvc") { - let compiler = self.get_base_compiler(); - let atlmfc_lib = compiler.env().iter().find(|&&(ref var, _)| { - var == OsStr::new("LIB") - }).and_then(|&(_, ref lib_paths)| { - env::split_paths(lib_paths).find(|path| { - let sub = Path::new("atlmfc/lib"); - path.ends_with(sub) || path.parent().map_or(false, |p| p.ends_with(sub)) - }) - }); - - if let Some(atlmfc_lib) = atlmfc_lib { - self.print(&format!("cargo:rustc-link-search=native={}", - atlmfc_lib.display())); - } - } - - self.print(&format!("cargo:rustc-link-lib=static={}", - &output[3..output.len() - 2])); - self.print(&format!("cargo:rustc-link-search=native={}", dst.display())); - - // Add specific C++ libraries, if enabled. - if self.cpp { - if let Some(stdlib) = self.get_cpp_link_stdlib() { - self.print(&format!("cargo:rustc-link-lib={}", stdlib)); - } - } - } - - #[cfg(feature = "parallel")] - fn compile_objects(&self, objs: &[(PathBuf, PathBuf)]) { - use self::rayon::prelude::*; - - let mut cfg = rayon::Configuration::new(); - if let Ok(amt) = env::var("NUM_JOBS") { - if let Ok(amt) = amt.parse() { - cfg = cfg.set_num_threads(amt); - } - } - drop(rayon::initialize(cfg)); - - objs.par_iter().weight_max().for_each(|&(ref src, ref dst)| { - self.compile_object(src, dst) - }) - } - - #[cfg(not(feature = "parallel"))] - fn compile_objects(&self, objs: &[(PathBuf, PathBuf)]) { - for &(ref src, ref dst) in objs { - self.compile_object(src, dst); - } - } - - fn compile_object(&self, file: &Path, dst: &Path) { - let is_asm = file.extension().and_then(|s| s.to_str()) == Some("asm"); - let msvc = self.get_target().contains("msvc"); - let (mut cmd, name) = if msvc && is_asm { - self.msvc_macro_assembler() - } else { - let compiler = self.get_compiler(); - let mut cmd = compiler.to_command(); - for &(ref a, ref b) in self.env.iter() { - cmd.env(a, b); - } - (cmd, compiler.path.file_name().unwrap() - .to_string_lossy().into_owned()) - }; - if msvc && is_asm { - cmd.arg("/Fo").arg(dst); - } else if msvc { - let mut s = OsString::from("/Fo"); - s.push(&dst); - cmd.arg(s); - } else { - cmd.arg("-o").arg(&dst); - } - cmd.arg(if msvc {"/c"} else {"-c"}); - cmd.arg(file); - - run(&mut cmd, &name); - } - - /// Get the compiler that's in use for this configuration. - /// - /// This function will return a `Tool` which represents the culmination - /// of this configuration at a snapshot in time. The returned compiler can - /// be inspected (e.g. the path, arguments, environment) to forward along to - /// other tools, or the `to_command` method can be used to invoke the - /// compiler itself. - /// - /// This method will take into account all configuration such as debug - /// information, optimization level, include directories, defines, etc. - /// Additionally, the compiler binary in use follows the standard - /// conventions for this path, e.g. looking at the explicitly set compiler, - /// environment variables (a number of which are inspected here), and then - /// falling back to the default configuration. - pub fn get_compiler(&self) -> Tool { - let opt_level = self.get_opt_level(); - let debug = self.get_debug(); - let target = self.get_target(); - let msvc = target.contains("msvc"); - self.print(&format!("debug={} opt-level={}", debug, opt_level)); - - let mut cmd = self.get_base_compiler(); - let nvcc = cmd.path.to_str() - .map(|path| path.contains("nvcc")) - .unwrap_or(false); - - if msvc { - cmd.args.push("/nologo".into()); - let features = env::var("CARGO_CFG_TARGET_FEATURE") - .unwrap_or(String::new()); - if features.contains("crt-static") { - cmd.args.push("/MT".into()); - } else { - cmd.args.push("/MD".into()); - } - match &opt_level[..] { - "z" | "s" => cmd.args.push("/Os".into()), - "2" => cmd.args.push("/O2".into()), - "1" => cmd.args.push("/O1".into()), - _ => {} - } - if target.contains("i586") { - cmd.args.push("/ARCH:IA32".into()); - } - } else if nvcc { - cmd.args.push(format!("-O{}", opt_level).into()); - } else { - cmd.args.push(format!("-O{}", opt_level).into()); - cmd.args.push("-ffunction-sections".into()); - cmd.args.push("-fdata-sections".into()); - } - for arg in self.envflags(if self.cpp {"CXXFLAGS"} else {"CFLAGS"}) { - cmd.args.push(arg.into()); - } - - if debug { - cmd.args.push(if msvc {"/Z7"} else {"-g"}.into()); - } - - if target.contains("-ios") { - self.ios_flags(&mut cmd); - } else if !msvc { - if target.contains("i686") || target.contains("i586") { - cmd.args.push("-m32".into()); - } else if target.contains("x86_64") || target.contains("powerpc64") { - cmd.args.push("-m64".into()); - } - - if !nvcc && self.pic.unwrap_or(!target.contains("i686") && !target.contains("windows-gnu")) { - cmd.args.push("-fPIC".into()); - } else if nvcc && self.pic.unwrap_or(false) { - cmd.args.push("-Xcompiler".into()); - cmd.args.push("\'-fPIC\'".into()); - } - if target.contains("musl") { - cmd.args.push("-static".into()); - } - - if target.starts_with("armv7-unknown-linux-") { - cmd.args.push("-march=armv7-a".into()); - } - if target.starts_with("armv7-linux-androideabi") { - cmd.args.push("-march=armv7-a".into()); - cmd.args.push("-mfpu=vfpv3-d16".into()); - } - if target.starts_with("arm-unknown-linux-") { - cmd.args.push("-march=armv6".into()); - cmd.args.push("-marm".into()); - } - if target.starts_with("i586-unknown-linux-") { - cmd.args.push("-march=pentium".into()); - } - if target.starts_with("i686-unknown-linux-") { - cmd.args.push("-march=i686".into()); - } - if target.starts_with("thumb") { - cmd.args.push("-mthumb".into()); - - if target.ends_with("eabihf") { - cmd.args.push("-mfloat-abi=hard".into()) - } - } - if target.starts_with("thumbv6m") { - cmd.args.push("-march=armv6s-m".into()); - } - if target.starts_with("thumbv7em") { - cmd.args.push("-march=armv7e-m".into()); - - if target.ends_with("eabihf") { - cmd.args.push("-mfpu=fpv4-sp-d16".into()) - } - } - if target.starts_with("thumbv7m") { - cmd.args.push("-march=armv7-m".into()); - } - } - - if self.cpp && !msvc { - if let Some(ref stdlib) = self.cpp_set_stdlib { - cmd.args.push(format!("-stdlib=lib{}", stdlib).into()); - } - } - - for directory in self.include_directories.iter() { - cmd.args.push(if msvc {"/I"} else {"-I"}.into()); - cmd.args.push(directory.into()); - } - - for flag in self.flags.iter() { - cmd.args.push(flag.into()); - } - - for &(ref key, ref value) in self.definitions.iter() { - let lead = if msvc {"/"} else {"-"}; - if let &Some(ref value) = value { - cmd.args.push(format!("{}D{}={}", lead, key, value).into()); - } else { - cmd.args.push(format!("{}D{}", lead, key).into()); - } - } - cmd - } - - fn msvc_macro_assembler(&self) -> (Command, String) { - let target = self.get_target(); - let tool = if target.contains("x86_64") {"ml64.exe"} else {"ml.exe"}; - let mut cmd = windows_registry::find(&target, tool).unwrap_or_else(|| { - self.cmd(tool) - }); - for directory in self.include_directories.iter() { - cmd.arg("/I").arg(directory); - } - for &(ref key, ref value) in self.definitions.iter() { - if let &Some(ref value) = value { - cmd.arg(&format!("/D{}={}", key, value)); - } else { - cmd.arg(&format!("/D{}", key)); - } - } - - if target.contains("i686") || target.contains("i586") { - cmd.arg("/safeseh"); - } - for flag in self.flags.iter() { - cmd.arg(flag); - } - - (cmd, tool.to_string()) - } - - fn assemble(&self, lib_name: &str, dst: &Path, objects: &[PathBuf]) { - // Delete the destination if it exists as the `ar` tool at least on Unix - // appends to it, which we don't want. - let _ = fs::remove_file(&dst); - - let target = self.get_target(); - if target.contains("msvc") { - let mut cmd = match self.archiver { - Some(ref s) => self.cmd(s), - None => windows_registry::find(&target, "lib.exe") - .unwrap_or(self.cmd("lib.exe")), - }; - let mut out = OsString::from("/OUT:"); - out.push(dst); - run(cmd.arg(out).arg("/nologo") - .args(objects) - .args(&self.objects), "lib.exe"); - - // The Rust compiler will look for libfoo.a and foo.lib, but the - // MSVC linker will also be passed foo.lib, so be sure that both - // exist for now. - let lib_dst = dst.with_file_name(format!("{}.lib", lib_name)); - let _ = fs::remove_file(&lib_dst); - fs::hard_link(&dst, &lib_dst).or_else(|_| { - //if hard-link fails, just copy (ignoring the number of bytes written) - fs::copy(&dst, &lib_dst).map(|_| ()) - }).ok().expect("Copying from {:?} to {:?} failed.");; - } else { - let ar = self.get_ar(); - let cmd = ar.file_name().unwrap().to_string_lossy(); - run(self.cmd(&ar).arg("crs") - .arg(dst) - .args(objects) - .args(&self.objects), &cmd); - } - } - - fn ios_flags(&self, cmd: &mut Tool) { - enum ArchSpec { - Device(&'static str), - Simulator(&'static str), - } - - let target = self.get_target(); - let arch = target.split('-').nth(0).unwrap(); - let arch = match arch { - "arm" | "armv7" | "thumbv7" => ArchSpec::Device("armv7"), - "armv7s" | "thumbv7s" => ArchSpec::Device("armv7s"), - "arm64" | "aarch64" => ArchSpec::Device("arm64"), - "i386" | "i686" => ArchSpec::Simulator("-m32"), - "x86_64" => ArchSpec::Simulator("-m64"), - _ => fail("Unknown arch for iOS target") - }; - - let sdk = match arch { - ArchSpec::Device(arch) => { - cmd.args.push("-arch".into()); - cmd.args.push(arch.into()); - cmd.args.push("-miphoneos-version-min=7.0".into()); - "iphoneos" - }, - ArchSpec::Simulator(arch) => { - cmd.args.push(arch.into()); - cmd.args.push("-mios-simulator-version-min=7.0".into()); - "iphonesimulator" - } - }; - - self.print(&format!("Detecting iOS SDK path for {}", sdk)); - let sdk_path = self.cmd("xcrun") - .arg("--show-sdk-path") - .arg("--sdk") - .arg(sdk) - .stderr(Stdio::inherit()) - .output() - .unwrap() - .stdout; - - let sdk_path = String::from_utf8(sdk_path).unwrap(); - - cmd.args.push("-isysroot".into()); - cmd.args.push(sdk_path.trim().into()); - } - - fn cmd>(&self, prog: P) -> Command { - let mut cmd = Command::new(prog); - for &(ref a, ref b) in self.env.iter() { - cmd.env(a, b); - } - return cmd - } - - fn get_base_compiler(&self) -> Tool { - if let Some(ref c) = self.compiler { - return Tool::new(c.clone()) - } - let host = self.get_host(); - let target = self.get_target(); - let (env, msvc, gnu, default) = if self.cpp { - ("CXX", "cl.exe", "g++", "c++") - } else { - ("CC", "cl.exe", "gcc", "cc") - }; - self.env_tool(env).map(|(tool, args)| { - let mut t = Tool::new(PathBuf::from(tool)); - for arg in args { - t.args.push(arg.into()); - } - return t - }).or_else(|| { - if target.contains("emscripten") { - if self.cpp { - Some(Tool::new(PathBuf::from("em++"))) - } else { - Some(Tool::new(PathBuf::from("emcc"))) - } - } else { - None - } - }).or_else(|| { - windows_registry::find_tool(&target, "cl.exe") - }).unwrap_or_else(|| { - let compiler = if host.contains("windows") && - target.contains("windows") { - if target.contains("msvc") { - msvc.to_string() - } else { - format!("{}.exe", gnu) - } - } else if target.contains("android") { - format!("{}-{}", target, gnu) - } else if self.get_host() != target { - // CROSS_COMPILE is of the form: "arm-linux-gnueabi-" - let cc_env = self.getenv("CROSS_COMPILE"); - let cross_compile = cc_env.as_ref().map(|s| s.trim_right_matches('-')); - let prefix = cross_compile.or(match &target[..] { - "aarch64-unknown-linux-gnu" => Some("aarch64-linux-gnu"), - "arm-unknown-linux-gnueabi" => Some("arm-linux-gnueabi"), - "arm-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), - "arm-unknown-linux-musleabi" => Some("arm-linux-musleabi"), - "arm-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), - "arm-unknown-netbsdelf-eabi" => Some("arm--netbsdelf-eabi"), - "armv6-unknown-netbsdelf-eabihf" => Some("armv6--netbsdelf-eabihf"), - "armv7-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), - "armv7-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), - "armv7-unknown-netbsdelf-eabihf" => Some("armv7--netbsdelf-eabihf"), - "i686-pc-windows-gnu" => Some("i686-w64-mingw32"), - "i686-unknown-linux-musl" => Some("musl"), - "i686-unknown-netbsdelf" => Some("i486--netbsdelf"), - "mips-unknown-linux-gnu" => Some("mips-linux-gnu"), - "mipsel-unknown-linux-gnu" => Some("mipsel-linux-gnu"), - "mips64-unknown-linux-gnuabi64" => Some("mips64-linux-gnuabi64"), - "mips64el-unknown-linux-gnuabi64" => Some("mips64el-linux-gnuabi64"), - "powerpc-unknown-linux-gnu" => Some("powerpc-linux-gnu"), - "powerpc-unknown-netbsd" => Some("powerpc--netbsd"), - "powerpc64-unknown-linux-gnu" => Some("powerpc-linux-gnu"), - "powerpc64le-unknown-linux-gnu" => Some("powerpc64le-linux-gnu"), - "s390x-unknown-linux-gnu" => Some("s390x-linux-gnu"), - "thumbv6m-none-eabi" => Some("arm-none-eabi"), - "thumbv7em-none-eabi" => Some("arm-none-eabi"), - "thumbv7em-none-eabihf" => Some("arm-none-eabi"), - "thumbv7m-none-eabi" => Some("arm-none-eabi"), - "x86_64-pc-windows-gnu" => Some("x86_64-w64-mingw32"), - "x86_64-rumprun-netbsd" => Some("x86_64-rumprun-netbsd"), - "x86_64-unknown-linux-musl" => Some("musl"), - "x86_64-unknown-netbsd" => Some("x86_64--netbsd"), - _ => None, - }); - match prefix { - Some(prefix) => format!("{}-{}", prefix, gnu), - None => default.to_string(), - } - } else { - default.to_string() - }; - Tool::new(PathBuf::from(compiler)) - }) - } - - fn get_var(&self, var_base: &str) -> Result { - let target = self.get_target(); - let host = self.get_host(); - let kind = if host == target {"HOST"} else {"TARGET"}; - let target_u = target.replace("-", "_"); - let res = self.getenv(&format!("{}_{}", var_base, target)) - .or_else(|| self.getenv(&format!("{}_{}", var_base, target_u))) - .or_else(|| self.getenv(&format!("{}_{}", kind, var_base))) - .or_else(|| self.getenv(var_base)); - - match res { - Some(res) => Ok(res), - None => Err("could not get environment variable".to_string()), - } - } - - fn envflags(&self, name: &str) -> Vec { - self.get_var(name).unwrap_or(String::new()) - .split(|c: char| c.is_whitespace()).filter(|s| !s.is_empty()) - .map(|s| s.to_string()) - .collect() - } - - fn env_tool(&self, name: &str) -> Option<(String, Vec)> { - self.get_var(name).ok().map(|tool| { - let whitelist = ["ccache", "distcc"]; - for t in whitelist.iter() { - if tool.starts_with(t) && tool[t.len()..].starts_with(" ") { - return (t.to_string(), - vec![tool[t.len()..].trim_left().to_string()]) - } - } - (tool, Vec::new()) - }) - } - - /// Returns the default C++ standard library for the current target: `libc++` - /// for OS X and `libstdc++` for anything else. - fn get_cpp_link_stdlib(&self) -> Option { - self.cpp_link_stdlib.clone().unwrap_or_else(|| { - let target = self.get_target(); - if target.contains("msvc") { - None - } else if target.contains("darwin") { - Some("c++".to_string()) - } else if target.contains("freebsd") { - Some("c++".to_string()) - } else { - Some("stdc++".to_string()) - } - }) - } - - fn get_ar(&self) -> PathBuf { - self.archiver.clone().or_else(|| { - self.get_var("AR").map(PathBuf::from).ok() - }).unwrap_or_else(|| { - if self.get_target().contains("android") { - PathBuf::from(format!("{}-ar", self.get_target())) - } else if self.get_target().contains("emscripten") { - PathBuf::from("emar") - } else { - PathBuf::from("ar") - } - }) - } - - fn get_target(&self) -> String { - self.target.clone().unwrap_or_else(|| self.getenv_unwrap("TARGET")) - } - - fn get_host(&self) -> String { - self.host.clone().unwrap_or_else(|| self.getenv_unwrap("HOST")) - } - - fn get_opt_level(&self) -> String { - self.opt_level.as_ref().cloned().unwrap_or_else(|| { - self.getenv_unwrap("OPT_LEVEL") - }) - } - - fn get_debug(&self) -> bool { - self.debug.unwrap_or_else(|| self.getenv_unwrap("PROFILE") == "debug") - } - - fn get_out_dir(&self) -> PathBuf { - self.out_dir.clone().unwrap_or_else(|| { - env::var_os("OUT_DIR").map(PathBuf::from).unwrap() - }) - } - - fn getenv(&self, v: &str) -> Option { - let r = env::var(v).ok(); - self.print(&format!("{} = {:?}", v, r)); - r - } - - fn getenv_unwrap(&self, v: &str) -> String { - match self.getenv(v) { - Some(s) => s, - None => fail(&format!("environment variable `{}` not defined", v)), - } - } - - fn print(&self, s: &str) { - if self.cargo_metadata { - println!("{}", s); - } - } -} - -impl Tool { - fn new(path: PathBuf) -> Tool { - Tool { - path: path, - args: Vec::new(), - env: Vec::new(), - } - } - - /// Converts this compiler into a `Command` that's ready to be run. - /// - /// This is useful for when the compiler needs to be executed and the - /// command returned will already have the initial arguments and environment - /// variables configured. - pub fn to_command(&self) -> Command { - let mut cmd = Command::new(&self.path); - cmd.args(&self.args); - for &(ref k, ref v) in self.env.iter() { - cmd.env(k, v); - } - return cmd - } - - /// Returns the path for this compiler. - /// - /// Note that this may not be a path to a file on the filesystem, e.g. "cc", - /// but rather something which will be resolved when a process is spawned. - pub fn path(&self) -> &Path { - &self.path - } - - /// Returns the default set of arguments to the compiler needed to produce - /// executables for the target this compiler generates. - pub fn args(&self) -> &[OsString] { - &self.args - } - - /// Returns the set of environment variables needed for this compiler to - /// operate. - /// - /// This is typically only used for MSVC compilers currently. - pub fn env(&self) -> &[(OsString, OsString)] { - &self.env - } -} - -fn run(cmd: &mut Command, program: &str) { - println!("running: {:?}", cmd); - // Capture the standard error coming from these programs, and write it out - // with cargo:warning= prefixes. Note that this is a bit wonky to avoid - // requiring the output to be UTF-8, we instead just ship bytes from one - // location to another. - let spawn_result = match cmd.stderr(Stdio::piped()).spawn() { - Ok(mut child) => { - let stderr = BufReader::new(child.stderr.take().unwrap()); - for line in stderr.split(b'\n').filter_map(|l| l.ok()) { - print!("cargo:warning="); - std::io::stdout().write_all(&line).unwrap(); - println!(""); - } - child.wait() - } - Err(e) => Err(e), - }; - let status = match spawn_result { - Ok(status) => status, - Err(ref e) if e.kind() == io::ErrorKind::NotFound => { - let extra = if cfg!(windows) { - " (see https://github.com/alexcrichton/gcc-rs#compile-time-requirements \ - for help)" - } else { - "" - }; - fail(&format!("failed to execute command: {}\nIs `{}` \ - not installed?{}", e, program, extra)); - } - Err(e) => fail(&format!("failed to execute command: {}", e)), - }; - println!("{:?}", status); - if !status.success() { - fail(&format!("command did not execute successfully, got: {}", status)); - } -} - -fn fail(s: &str) -> ! { - println!("\n\n{}\n\n", s); - panic!() -} diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/src/registry.rs cargo-0.19.0/vendor/gcc-0.3.39/src/registry.rs --- cargo-0.17.0/vendor/gcc-0.3.39/src/registry.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/src/registry.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,169 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::ffi::{OsString, OsStr}; -use std::io; -use std::ops::RangeFrom; -use std::os::raw; -use std::os::windows::prelude::*; - -pub struct RegistryKey(Repr); - -type HKEY = *mut u8; -type DWORD = u32; -type LPDWORD = *mut DWORD; -type LPCWSTR = *const u16; -type LPWSTR = *mut u16; -type LONG = raw::c_long; -type PHKEY = *mut HKEY; -type PFILETIME = *mut u8; -type LPBYTE = *mut u8; -type REGSAM = u32; - -const ERROR_SUCCESS: DWORD = 0; -const ERROR_NO_MORE_ITEMS: DWORD = 259; -const HKEY_LOCAL_MACHINE: HKEY = 0x80000002 as HKEY; -const REG_SZ: DWORD = 1; -const KEY_READ: DWORD = 0x20019; -const KEY_WOW64_32KEY: DWORD = 0x200; - -#[link(name = "advapi32")] -extern "system" { - fn RegOpenKeyExW(key: HKEY, - lpSubKey: LPCWSTR, - ulOptions: DWORD, - samDesired: REGSAM, - phkResult: PHKEY) -> LONG; - fn RegEnumKeyExW(key: HKEY, - dwIndex: DWORD, - lpName: LPWSTR, - lpcName: LPDWORD, - lpReserved: LPDWORD, - lpClass: LPWSTR, - lpcClass: LPDWORD, - lpftLastWriteTime: PFILETIME) -> LONG; - fn RegQueryValueExW(hKey: HKEY, - lpValueName: LPCWSTR, - lpReserved: LPDWORD, - lpType: LPDWORD, - lpData: LPBYTE, - lpcbData: LPDWORD) -> LONG; - fn RegCloseKey(hKey: HKEY) -> LONG; -} - -struct OwnedKey(HKEY); - -enum Repr { - Const(HKEY), - Owned(OwnedKey), -} - -pub struct Iter<'a> { - idx: RangeFrom, - key: &'a RegistryKey, -} - -unsafe impl Sync for Repr {} -unsafe impl Send for Repr {} - -pub static LOCAL_MACHINE: RegistryKey = - RegistryKey(Repr::Const(HKEY_LOCAL_MACHINE)); - -impl RegistryKey { - fn raw(&self) -> HKEY { - match self.0 { - Repr::Const(val) => val, - Repr::Owned(ref val) => val.0, - } - } - - pub fn open(&self, key: &OsStr) -> io::Result { - let key = key.encode_wide().chain(Some(0)).collect::>(); - let mut ret = 0 as *mut _; - let err = unsafe { - RegOpenKeyExW(self.raw(), key.as_ptr(), 0, - KEY_READ | KEY_WOW64_32KEY, &mut ret) - }; - if err == ERROR_SUCCESS as LONG { - Ok(RegistryKey(Repr::Owned(OwnedKey(ret)))) - } else { - Err(io::Error::from_raw_os_error(err as i32)) - } - } - - pub fn iter(&self) -> Iter { - Iter { idx: 0.., key: self } - } - - pub fn query_str(&self, name: &str) -> io::Result { - let name: &OsStr = name.as_ref(); - let name = name.encode_wide().chain(Some(0)).collect::>(); - let mut len = 0; - let mut kind = 0; - unsafe { - let err = RegQueryValueExW(self.raw(), name.as_ptr(), 0 as *mut _, - &mut kind, 0 as *mut _, &mut len); - if err != ERROR_SUCCESS as LONG { - return Err(io::Error::from_raw_os_error(err as i32)) - } - if kind != REG_SZ { - return Err(io::Error::new(io::ErrorKind::Other, - "registry key wasn't a string")) - } - - // The length here is the length in bytes, but we're using wide - // characters so we need to be sure to halve it for the capacity - // passed in. - let mut v = Vec::with_capacity(len as usize / 2); - let err = RegQueryValueExW(self.raw(), name.as_ptr(), 0 as *mut _, - 0 as *mut _, v.as_mut_ptr() as *mut _, - &mut len); - if err != ERROR_SUCCESS as LONG { - return Err(io::Error::from_raw_os_error(err as i32)) - } - v.set_len(len as usize / 2); - - // Some registry keys may have a terminating nul character, but - // we're not interested in that, so chop it off if it's there. - if v[v.len() - 1] == 0 { - v.pop(); - } - Ok(OsString::from_wide(&v)) - } - } -} - -impl Drop for OwnedKey { - fn drop(&mut self) { - unsafe { RegCloseKey(self.0); } - } -} - -impl<'a> Iterator for Iter<'a> { - type Item = io::Result; - - fn next(&mut self) -> Option> { - self.idx.next().and_then(|i| unsafe { - let mut v = Vec::with_capacity(256); - let mut len = v.capacity() as DWORD; - let ret = RegEnumKeyExW(self.key.raw(), i, v.as_mut_ptr(), &mut len, - 0 as *mut _, 0 as *mut _, 0 as *mut _, - 0 as *mut _); - if ret == ERROR_NO_MORE_ITEMS as LONG { - None - } else if ret != ERROR_SUCCESS as LONG { - Some(Err(io::Error::from_raw_os_error(ret as i32))) - } else { - v.set_len(len as usize); - Some(Ok(OsString::from_wide(&v))) - } - }) - } -} diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/src/windows_registry.rs cargo-0.19.0/vendor/gcc-0.3.39/src/windows_registry.rs --- cargo-0.17.0/vendor/gcc-0.3.39/src/windows_registry.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/src/windows_registry.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,430 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A helper module to probe the Windows Registry when looking for -//! windows-specific tools. - -use std::process::Command; - -use Tool; - -macro_rules! otry { - ($expr:expr) => (match $expr { - Some(val) => val, - None => return None, - }) -} - -/// Attempts to find a tool within an MSVC installation using the Windows -/// registry as a point to search from. -/// -/// The `target` argument is the target that the tool should work for (e.g. -/// compile or link for) and the `tool` argument is the tool to find (e.g. -/// `cl.exe` or `link.exe`). -/// -/// This function will return `None` if the tool could not be found, or it will -/// return `Some(cmd)` which represents a command that's ready to execute the -/// tool with the appropriate environment variables set. -/// -/// Note that this function always returns `None` for non-MSVC targets. -pub fn find(target: &str, tool: &str) -> Option { - find_tool(target, tool).map(|c| c.to_command()) -} - -/// Similar to the `find` function above, this function will attempt the same -/// operation (finding a MSVC tool in a local install) but instead returns a -/// `Tool` which may be introspected. -#[cfg(not(windows))] -pub fn find_tool(_target: &str, _tool: &str) -> Option { - None -} - -/// Documented above. -#[cfg(windows)] -pub fn find_tool(target: &str, tool: &str) -> Option { - use std::env; - use std::ffi::OsString; - use std::mem; - use std::path::{Path, PathBuf}; - use registry::{RegistryKey, LOCAL_MACHINE}; - - struct MsvcTool { - tool: PathBuf, - libs: Vec, - path: Vec, - include: Vec, - } - - impl MsvcTool { - fn new(tool: PathBuf) -> MsvcTool { - MsvcTool { - tool: tool, - libs: Vec::new(), - path: Vec::new(), - include: Vec::new(), - } - } - - fn into_tool(self) -> Tool { - let MsvcTool { tool, libs, path, include } = self; - let mut tool = Tool::new(tool.into()); - add_env(&mut tool, "LIB", libs); - add_env(&mut tool, "PATH", path); - add_env(&mut tool, "INCLUDE", include); - return tool - } - } - - // This logic is all tailored for MSVC, if we're not that then bail out - // early. - if !target.contains("msvc") { - return None - } - - // Looks like msbuild isn't located in the same location as other tools like - // cl.exe and lib.exe. To handle this we probe for it manually with - // dedicated registry keys. - if tool.contains("msbuild") { - return find_msbuild(target) - } - - // If VCINSTALLDIR is set, then someone's probably already run vcvars and we - // should just find whatever that indicates. - if env::var_os("VCINSTALLDIR").is_some() { - return env::var_os("PATH").and_then(|path| { - env::split_paths(&path).map(|p| p.join(tool)).find(|p| p.exists()) - }).map(|path| { - Tool::new(path.into()) - }) - } - - // Ok, if we're here, now comes the fun part of the probing. Default shells - // or shells like MSYS aren't really configured to execute `cl.exe` and the - // various compiler tools shipped as part of Visual Studio. Here we try to - // first find the relevant tool, then we also have to be sure to fill in - // environment variables like `LIB`, `INCLUDE`, and `PATH` to ensure that - // the tool is actually usable. - - return find_msvc_latest(tool, target, "15.0").or_else(|| { - find_msvc_latest(tool, target, "14.0") - }).or_else(|| { - find_msvc_12(tool, target) - }).or_else(|| { - find_msvc_11(tool, target) - }); - - // For MSVC 14 or newer we need to find the Universal CRT as well as either - // the Windows 10 SDK or Windows 8.1 SDK. - fn find_msvc_latest(tool: &str, target: &str, ver: &str) -> Option { - let vcdir = otry!(get_vc_dir(ver)); - let mut tool = otry!(get_tool(tool, &vcdir, target)); - let sub = otry!(lib_subdir(target)); - let (ucrt, ucrt_version) = otry!(get_ucrt_dir()); - - let ucrt_include = ucrt.join("include").join(&ucrt_version); - tool.include.push(ucrt_include.join("ucrt")); - - let ucrt_lib = ucrt.join("lib").join(&ucrt_version); - tool.libs.push(ucrt_lib.join("ucrt").join(sub)); - - if let Some((sdk, version)) = get_sdk10_dir() { - tool.path.push(sdk.join("bin").join(sub)); - let sdk_lib = sdk.join("lib").join(&version); - tool.libs.push(sdk_lib.join("um").join(sub)); - let sdk_include = sdk.join("include").join(&version); - tool.include.push(sdk_include.join("um")); - tool.include.push(sdk_include.join("winrt")); - tool.include.push(sdk_include.join("shared")); - } else if let Some(sdk) = get_sdk81_dir() { - tool.path.push(sdk.join("bin").join(sub)); - let sdk_lib = sdk.join("lib").join("winv6.3"); - tool.libs.push(sdk_lib.join("um").join(sub)); - let sdk_include = sdk.join("include"); - tool.include.push(sdk_include.join("um")); - tool.include.push(sdk_include.join("winrt")); - tool.include.push(sdk_include.join("shared")); - } else { - return None - } - Some(tool.into_tool()) - } - - // For MSVC 12 we need to find the Windows 8.1 SDK. - fn find_msvc_12(tool: &str, target: &str) -> Option { - let vcdir = otry!(get_vc_dir("12.0")); - let mut tool = otry!(get_tool(tool, &vcdir, target)); - let sub = otry!(lib_subdir(target)); - let sdk81 = otry!(get_sdk81_dir()); - tool.path.push(sdk81.join("bin").join(sub)); - let sdk_lib = sdk81.join("lib").join("winv6.3"); - tool.libs.push(sdk_lib.join("um").join(sub)); - let sdk_include = sdk81.join("include"); - tool.include.push(sdk_include.join("shared")); - tool.include.push(sdk_include.join("um")); - tool.include.push(sdk_include.join("winrt")); - Some(tool.into_tool()) - } - - // For MSVC 11 we need to find the Windows 8 SDK. - fn find_msvc_11(tool: &str, target: &str) -> Option { - let vcdir = otry!(get_vc_dir("11.0")); - let mut tool = otry!(get_tool(tool, &vcdir, target)); - let sub = otry!(lib_subdir(target)); - let sdk8 = otry!(get_sdk8_dir()); - tool.path.push(sdk8.join("bin").join(sub)); - let sdk_lib = sdk8.join("lib").join("win8"); - tool.libs.push(sdk_lib.join("um").join(sub)); - let sdk_include = sdk8.join("include"); - tool.include.push(sdk_include.join("shared")); - tool.include.push(sdk_include.join("um")); - tool.include.push(sdk_include.join("winrt")); - Some(tool.into_tool()) - } - - fn add_env(tool: &mut Tool, env: &str, paths: Vec) { - let prev = env::var_os(env).unwrap_or(OsString::new()); - let prev = env::split_paths(&prev); - let new = paths.into_iter().chain(prev); - tool.env.push((env.to_string().into(), env::join_paths(new).unwrap())); - } - - // Given a possible MSVC installation directory, we look for the linker and - // then add the MSVC library path. - fn get_tool(tool: &str, path: &Path, target: &str) -> Option { - bin_subdir(target).into_iter().map(|(sub, host)| { - (path.join("bin").join(sub).join(tool), - path.join("bin").join(host)) - }).filter(|&(ref path, _)| { - path.is_file() - }).map(|(path, host)| { - let mut tool = MsvcTool::new(path); - tool.path.push(host); - tool - }).filter_map(|mut tool| { - let sub = otry!(vc_lib_subdir(target)); - tool.libs.push(path.join("lib").join(sub)); - tool.include.push(path.join("include")); - let atlmfc_path = path.join("atlmfc"); - if atlmfc_path.exists() { - tool.libs.push(atlmfc_path.join("lib").join(sub)); - tool.include.push(atlmfc_path.join("include")); - } - Some(tool) - }).next() - } - - // To find MSVC we look in a specific registry key for the version we are - // trying to find. - fn get_vc_dir(ver: &str) -> Option { - let key = r"SOFTWARE\Microsoft\VisualStudio\SxS\VC7"; - let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); - let path = otry!(key.query_str(ver).ok()); - Some(path.into()) - } - - // To find the Universal CRT we look in a specific registry key for where - // all the Universal CRTs are located and then sort them asciibetically to - // find the newest version. While this sort of sorting isn't ideal, it is - // what vcvars does so that's good enough for us. - // - // Returns a pair of (root, version) for the ucrt dir if found - fn get_ucrt_dir() -> Option<(PathBuf, String)> { - let key = r"SOFTWARE\Microsoft\Windows Kits\Installed Roots"; - let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); - let root = otry!(key.query_str("KitsRoot10").ok()); - let readdir = otry!(Path::new(&root).join("lib").read_dir().ok()); - let max_libdir = otry!(readdir.filter_map(|dir| { - dir.ok() - }).map(|dir| { - dir.path() - }).filter(|dir| { - dir.components().last().and_then(|c| { - c.as_os_str().to_str() - }).map(|c| { - c.starts_with("10.") && dir.join("ucrt").is_dir() - }).unwrap_or(false) - }).max()); - let version = max_libdir.components().last().unwrap(); - let version = version.as_os_str().to_str().unwrap().to_string(); - Some((root.into(), version)) - } - - // Vcvars finds the correct version of the Windows 10 SDK by looking - // for the include `um\Windows.h` because sometimes a given version will - // only have UCRT bits without the rest of the SDK. Since we only care about - // libraries and not includes, we instead look for `um\x64\kernel32.lib`. - // Since the 32-bit and 64-bit libraries are always installed together we - // only need to bother checking x64, making this code a tiny bit simpler. - // Like we do for the Universal CRT, we sort the possibilities - // asciibetically to find the newest one as that is what vcvars does. - fn get_sdk10_dir() -> Option<(PathBuf, String)> { - let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v10.0"; - let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); - let root = otry!(key.query_str("InstallationFolder").ok()); - let readdir = otry!(Path::new(&root).join("lib").read_dir().ok()); - let mut dirs = readdir.filter_map(|dir| dir.ok()) - .map(|dir| dir.path()) - .collect::>(); - dirs.sort(); - let dir = otry!(dirs.into_iter().rev().filter(|dir| { - dir.join("um").join("x64").join("kernel32.lib").is_file() - }).next()); - let version = dir.components().last().unwrap(); - let version = version.as_os_str().to_str().unwrap().to_string(); - Some((root.into(), version)) - } - - // Interestingly there are several subdirectories, `win7` `win8` and - // `winv6.3`. Vcvars seems to only care about `winv6.3` though, so the same - // applies to us. Note that if we were targetting kernel mode drivers - // instead of user mode applications, we would care. - fn get_sdk81_dir() -> Option { - let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.1"; - let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); - let root = otry!(key.query_str("InstallationFolder").ok()); - Some(root.into()) - } - - fn get_sdk8_dir() -> Option { - let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.0"; - let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); - let root = otry!(key.query_str("InstallationFolder").ok()); - Some(root.into()) - } - - const PROCESSOR_ARCHITECTURE_INTEL: u16 = 0; - const PROCESSOR_ARCHITECTURE_AMD64: u16 = 9; - const X86: u16 = PROCESSOR_ARCHITECTURE_INTEL; - const X86_64: u16 = PROCESSOR_ARCHITECTURE_AMD64; - - // When choosing the tool to use, we have to choose the one which matches - // the target architecture. Otherwise we end up in situations where someone - // on 32-bit Windows is trying to cross compile to 64-bit and it tries to - // invoke the native 64-bit compiler which won't work. - // - // For the return value of this function, the first member of the tuple is - // the folder of the tool we will be invoking, while the second member is - // the folder of the host toolchain for that tool which is essential when - // using a cross linker. We return a Vec since on x64 there are often two - // linkers that can target the architecture we desire. The 64-bit host - // linker is preferred, and hence first, due to 64-bit allowing it more - // address space to work with and potentially being faster. - fn bin_subdir(target: &str) -> Vec<(&'static str, &'static str)> { - let arch = target.split('-').next().unwrap(); - match (arch, host_arch()) { - ("i586", X86) | - ("i686", X86) => vec![("", "")], - ("i586", X86_64) | - ("i686", X86_64) => vec![("amd64_x86", "amd64"), ("", "")], - ("x86_64", X86) => vec![("x86_amd64", "")], - ("x86_64", X86_64) => vec![("amd64", "amd64"), ("x86_amd64", "")], - ("arm", X86) => vec![("x86_arm", "")], - ("arm", X86_64) => vec![("amd64_arm", "amd64"), ("x86_arm", "")], - _ => vec![], - } - } - - fn lib_subdir(target: &str) -> Option<&'static str> { - let arch = target.split('-').next().unwrap(); - match arch { - "i586" | "i686" => Some("x86"), - "x86_64" => Some("x64"), - "arm" => Some("arm"), - _ => None, - } - } - - // MSVC's x86 libraries are not in a subfolder - fn vc_lib_subdir(target: &str) -> Option<&'static str> { - let arch = target.split('-').next().unwrap(); - match arch { - "i586" | "i686" => Some(""), - "x86_64" => Some("amd64"), - "arm" => Some("arm"), - _ => None, - } - } - - #[allow(bad_style)] - fn host_arch() -> u16 { - type DWORD = u32; - type WORD = u16; - type LPVOID = *mut u8; - type DWORD_PTR = usize; - - #[repr(C)] - struct SYSTEM_INFO { - wProcessorArchitecture: WORD, - _wReserved: WORD, - _dwPageSize: DWORD, - _lpMinimumApplicationAddress: LPVOID, - _lpMaximumApplicationAddress: LPVOID, - _dwActiveProcessorMask: DWORD_PTR, - _dwNumberOfProcessors: DWORD, - _dwProcessorType: DWORD, - _dwAllocationGranularity: DWORD, - _wProcessorLevel: WORD, - _wProcessorRevision: WORD, - } - - extern "system" { - fn GetNativeSystemInfo(lpSystemInfo: *mut SYSTEM_INFO); - } - - unsafe { - let mut info = mem::zeroed(); - GetNativeSystemInfo(&mut info); - info.wProcessorArchitecture - } - } - - // Given a registry key, look at all the sub keys and find the one which has - // the maximal numeric value. - // - // Returns the name of the maximal key as well as the opened maximal key. - fn max_version(key: &RegistryKey) -> Option<(OsString, RegistryKey)> { - let mut max_vers = 0; - let mut max_key = None; - for subkey in key.iter().filter_map(|k| k.ok()) { - let val = subkey.to_str().and_then(|s| { - s.trim_left_matches("v").replace(".", "").parse().ok() - }); - let val = match val { - Some(s) => s, - None => continue, - }; - if val > max_vers { - if let Ok(k) = key.open(&subkey) { - max_vers = val; - max_key = Some((subkey, k)); - } - } - } - return max_key - } - - // see http://stackoverflow.com/questions/328017/path-to-msbuild - fn find_msbuild(target: &str) -> Option { - let key = r"SOFTWARE\Microsoft\MSBuild\ToolsVersions"; - LOCAL_MACHINE.open(key.as_ref()).ok().and_then(|key| { - max_version(&key).and_then(|(_vers, key)| { - key.query_str("MSBuildToolsPath").ok() - }) - }).map(|path| { - let mut path = PathBuf::from(path); - path.push("MSBuild.exe"); - let mut tool = Tool::new(path); - if target.contains("x86_64") { - tool.env.push(("Platform".into(), "X64".into())); - } - tool - }) - } -} diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/tests/cc_env.rs cargo-0.19.0/vendor/gcc-0.3.39/tests/cc_env.rs --- cargo-0.17.0/vendor/gcc-0.3.39/tests/cc_env.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/tests/cc_env.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -extern crate tempdir; -extern crate gcc; - -use std::env; - -mod support; -use support::Test; - -#[test] -fn main() { - ccache(); - distcc(); - ccache_spaces(); -} - -fn ccache() { - let test = Test::gnu(); - test.shim("ccache"); - - env::set_var("CC", "ccache lol-this-is-not-a-compiler foo"); - test.gcc().file("foo.c").compile("libfoo.a"); - - test.cmd(0) - .must_have("lol-this-is-not-a-compiler foo") - .must_have("foo.c") - .must_not_have("ccache"); -} - -fn ccache_spaces() { - let test = Test::gnu(); - test.shim("ccache"); - - env::set_var("CC", "ccache lol-this-is-not-a-compiler foo"); - test.gcc().file("foo.c").compile("libfoo.a"); - test.cmd(0).must_have("lol-this-is-not-a-compiler foo"); -} - -fn distcc() { - let test = Test::gnu(); - test.shim("distcc"); - - env::set_var("CC", "distcc lol-this-is-not-a-compiler foo"); - test.gcc().file("foo.c").compile("libfoo.a"); - - test.cmd(0) - .must_have("lol-this-is-not-a-compiler foo") - .must_have("foo.c") - .must_not_have("distcc"); -} diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/tests/support/mod.rs cargo-0.19.0/vendor/gcc-0.3.39/tests/support/mod.rs --- cargo-0.17.0/vendor/gcc-0.3.39/tests/support/mod.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/tests/support/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -#![allow(dead_code)] - -use std::env; -use std::ffi::OsStr; -use std::fs::{self, File}; -use std::io::prelude::*; -use std::path::PathBuf; - -use gcc; -use tempdir::TempDir; - -pub struct Test { - pub td: TempDir, - pub gcc: PathBuf, - pub msvc: bool, -} - -pub struct Execution { - args: Vec, -} - -impl Test { - pub fn new() -> Test { - let mut gcc = PathBuf::from(env::current_exe().unwrap()); - gcc.pop(); - if gcc.ends_with("deps") { - gcc.pop(); - } - gcc.push(format!("gcc-shim{}", env::consts::EXE_SUFFIX)); - Test { - td: TempDir::new("gcc-test").unwrap(), - gcc: gcc, - msvc: false, - } - } - - pub fn gnu() -> Test { - let t = Test::new(); - t.shim("cc").shim("ar"); - return t - } - - pub fn msvc() -> Test { - let mut t = Test::new(); - t.shim("cl").shim("lib.exe"); - t.msvc = true; - return t - } - - pub fn shim(&self, name: &str) -> &Test { - let fname = format!("{}{}", name, env::consts::EXE_SUFFIX); - fs::hard_link(&self.gcc, self.td.path().join(&fname)).or_else(|_| { - fs::copy(&self.gcc, self.td.path().join(&fname)).map(|_| ()) - }).unwrap(); - self - } - - pub fn gcc(&self) -> gcc::Config { - let mut cfg = gcc::Config::new(); - let mut path = env::split_paths(&env::var_os("PATH").unwrap()) - .collect::>(); - path.insert(0, self.td.path().to_owned()); - let target = if self.msvc { - "x86_64-pc-windows-msvc" - } else { - "x86_64-unknown-linux-gnu" - }; - - cfg.target(target).host(target) - .opt_level(2) - .debug(false) - .out_dir(self.td.path()) - .__set_env("PATH", env::join_paths(path).unwrap()) - .__set_env("GCCTEST_OUT_DIR", self.td.path()); - if self.msvc { - cfg.compiler(self.td.path().join("cl")); - cfg.archiver(self.td.path().join("lib.exe")); - } - return cfg - } - - pub fn cmd(&self, i: u32) -> Execution { - let mut s = String::new(); - File::open(self.td.path().join(format!("out{}", i))).unwrap() - .read_to_string(&mut s).unwrap(); - Execution { - args: s.lines().map(|s| s.to_string()).collect(), - } - } -} - -impl Execution { - pub fn must_have>(&self, p: P) -> &Execution { - if !self.has(p.as_ref()) { - panic!("didn't find {:?} in {:?}", p.as_ref(), self.args); - } else { - self - } - } - - pub fn must_not_have>(&self, p: P) -> &Execution { - if self.has(p.as_ref()) { - panic!("found {:?}", p.as_ref()); - } else { - self - } - } - - pub fn has(&self, p: &OsStr) -> bool { - self.args.iter().any(|arg| { - OsStr::new(arg) == p - }) - } -} diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/tests/test.rs cargo-0.19.0/vendor/gcc-0.3.39/tests/test.rs --- cargo-0.17.0/vendor/gcc-0.3.39/tests/test.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/tests/test.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,207 +0,0 @@ -extern crate gcc; -extern crate tempdir; - -use support::Test; - -mod support; - -#[test] -fn gnu_smoke() { - let test = Test::gnu(); - test.gcc() - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_have("-O2") - .must_have("foo.c") - .must_not_have("-g") - .must_have("-c") - .must_have("-ffunction-sections") - .must_have("-fdata-sections"); - test.cmd(1).must_have(test.td.path().join("foo.o")); -} - -#[test] -fn gnu_opt_level_1() { - let test = Test::gnu(); - test.gcc() - .opt_level(1) - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_have("-O1") - .must_not_have("-O2"); -} - -#[test] -fn gnu_opt_level_s() { - let test = Test::gnu(); - test.gcc() - .opt_level_str("s") - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_have("-Os") - .must_not_have("-O1") - .must_not_have("-O2") - .must_not_have("-O3") - .must_not_have("-Oz"); -} - -#[test] -fn gnu_debug() { - let test = Test::gnu(); - test.gcc() - .debug(true) - .file("foo.c").compile("libfoo.a"); - test.cmd(0).must_have("-g"); -} - -#[test] -fn gnu_x86_64() { - for vendor in &["unknown-linux-gnu", "apple-darwin"] { - let target = format!("x86_64-{}", vendor); - let test = Test::gnu(); - test.gcc() - .target(&target) - .host(&target) - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_have("-fPIC") - .must_have("-m64"); - } -} - -#[test] -fn gnu_x86_64_no_pic() { - for vendor in &["unknown-linux-gnu", "apple-darwin"] { - let target = format!("x86_64-{}", vendor); - let test = Test::gnu(); - test.gcc() - .pic(false) - .target(&target) - .host(&target) - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_not_have("-fPIC"); - } -} - -#[test] -fn gnu_i686() { - for vendor in &["unknown-linux-gnu", "apple-darwin"] { - let target = format!("i686-{}", vendor); - let test = Test::gnu(); - test.gcc() - .target(&target) - .host(&target) - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_not_have("-fPIC") - .must_have("-m32"); - } -} - -#[test] -fn gnu_i686_pic() { - for vendor in &["unknown-linux-gnu", "apple-darwin"] { - let target = format!("i686-{}", vendor); - let test = Test::gnu(); - test.gcc() - .pic(true) - .target(&target) - .host(&target) - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_have("-fPIC"); - } -} - -#[test] -fn gnu_set_stdlib() { - let test = Test::gnu(); - test.gcc() - .cpp_set_stdlib(Some("foo")) - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_not_have("-stdlib=foo"); -} - -#[test] -fn gnu_include() { - let test = Test::gnu(); - test.gcc() - .include("foo/bar") - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_have("-I").must_have("foo/bar"); -} - -#[test] -fn gnu_define() { - let test = Test::gnu(); - test.gcc() - .define("FOO", Some("bar")) - .define("BAR", None) - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_have("-DFOO=bar").must_have("-DBAR"); -} - -#[test] -fn gnu_compile_assembly() { - let test = Test::gnu(); - test.gcc() - .file("foo.S").compile("libfoo.a"); - test.cmd(0).must_have("foo.S"); -} - -#[test] -fn msvc_smoke() { - let test = Test::msvc(); - test.gcc() - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_have("/O2") - .must_have("foo.c") - .must_not_have("/Z7") - .must_have("/c"); - test.cmd(1).must_have(test.td.path().join("foo.o")); -} - -#[test] -fn msvc_opt_level_0() { - let test = Test::msvc(); - test.gcc() - .opt_level(0) - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_not_have("/O2"); -} - -#[test] -fn msvc_debug() { - let test = Test::msvc(); - test.gcc() - .debug(true) - .file("foo.c").compile("libfoo.a"); - test.cmd(0).must_have("/Z7"); -} - -#[test] -fn msvc_include() { - let test = Test::msvc(); - test.gcc() - .include("foo/bar") - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_have("/I").must_have("foo/bar"); -} - -#[test] -fn msvc_define() { - let test = Test::msvc(); - test.gcc() - .define("FOO", Some("bar")) - .define("BAR", None) - .file("foo.c").compile("libfoo.a"); - - test.cmd(0).must_have("/DFOO=bar").must_have("/DBAR"); -} diff -Nru cargo-0.17.0/vendor/gcc-0.3.39/.travis.yml cargo-0.19.0/vendor/gcc-0.3.39/.travis.yml --- cargo-0.17.0/vendor/gcc-0.3.39/.travis.yml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.39/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -language: rust -rust: - - stable - - beta - - nightly -sudo: false -install: - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then OS=unknown-linux-gnu; else OS=apple-darwin; fi - - export TARGET=$ARCH-$OS - - curl https://static.rust-lang.org/rustup.sh | - sh -s -- --add-target=$TARGET --disable-sudo -y --prefix=`rustc --print sysroot` -before_script: - - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH -script: - - cargo build --verbose - - cargo test --verbose - - cargo test --verbose --features parallel - - cargo test --manifest-path gcc-test/Cargo.toml --target $TARGET - - cargo test --manifest-path gcc-test/Cargo.toml --target $TARGET --features parallel - - cargo test --manifest-path gcc-test/Cargo.toml --target $TARGET --release - - cargo doc - - cargo clean && cargo build - - rustdoc --test README.md -L target/debug -L target/debug/deps -after_success: - - travis-cargo --only nightly doc-upload -env: - global: - secure: "CBtqrudgE0PS8x3kTr44jKbC2D4nfnmdYVecooNm0qnER4B4TSvZpZSQoCgKK6k4BYQuOSyFTOwYx6M79w39ZMOgyCP9ytB+tyMWL0/+ZuUQL04yVg4M5vd3oJMkOaXbvG56ncgPyFrseY+FPDg+mXAzvJk/nily37YXjkQj2D0=" - - matrix: - - ARCH=x86_64 - - ARCH=i686 -notifications: - email: - on_success: never -os: - - linux - - osx -addons: - apt: - packages: - - g++-multilib diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/appveyor.yml cargo-0.19.0/vendor/gcc-0.3.45/appveyor.yml --- cargo-0.17.0/vendor/gcc-0.3.45/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/appveyor.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,35 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc + ARCH: amd64 + VS: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat + - TARGET: x86_64-pc-windows-msvc + ARCH: amd64 + VS: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat + - TARGET: i686-pc-windows-msvc + ARCH: x86 + VS: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat + - TARGET: i686-pc-windows-msvc + ARCH: x86 + VS: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat + - TARGET: x86_64-pc-windows-gnu + MSYS_BITS: 64 + - TARGET: i686-pc-windows-gnu + MSYS_BITS: 32 +install: + - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" + - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" + - if defined VS call "%VS%" %ARCH% + - set PATH=%PATH%;C:\Program Files (x86)\Rust\bin + - if defined MSYS_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS_BITS%\bin + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test --target %TARGET% + - cargo test --features parallel --target %TARGET% + - cargo test --manifest-path gcc-test/Cargo.toml --target %TARGET% + - cargo test --manifest-path gcc-test/Cargo.toml --features parallel --target %TARGET% + - cargo test --manifest-path gcc-test/Cargo.toml --release --target %TARGET% diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/.cargo-checksum.json cargo-0.19.0/vendor/gcc-0.3.45/.cargo-checksum.json --- cargo-0.17.0/vendor/gcc-0.3.45/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"4cc6445feac7e9a1f8f1e1c51cc3afd0cf7bb931e3c5a6f18c41258401652702",".travis.yml":"e68f9d10a8e367890cf734239c39952ee480cf0e8da9520b377df4a2b8ccc9e8","Cargo.toml":"c047908b8a8e94daf752dda9f5338b1e60885465298fd1340350945f667ea830","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"b1a639560fd536f2c3ab708a8e1066b675edd4d018dfa4e5e18d0d7327d81c15","appveyor.yml":"46c77d913eaa45871296942c2cd96ef092c9dcaf19201cb5c500a5107faeb06f","src/bin/gcc-shim.rs":"d6be9137cb48b86891e7b263adbf492e1193ffe682db9ba4a88eb1079b874b58","src/lib.rs":"b7bba8c458d10af66d5f0b12e4d8e0dbfcf5047d8ee8902f842a66828d00282d","src/registry.rs":"3876ef9573e3bbc050aef41a684b9a510cc1a91b15ae874fe032cf4377b4d116","src/windows_registry.rs":"36c6a7f8322407faff2dcfd4789d0876d034885944bc0340ac7c1f7cbfc307f1","tests/cc_env.rs":"d92c5e3d3d43ac244e63b2cd2c93a521fcf124bf1ccf8d4c6bfa7f8333d88976","tests/support/mod.rs":"56bcfd1e2ff5ae8e581c71229444a3d96094bf689808808dd80e315bd6632083","tests/test.rs":"581d1dd40ccfd8a34314fde958a827472e9828e5121be447b43d4b0838476285"},"package":"40899336fb50db0c78710f53e87afc54d8c7266fb76262fecc78ca1a7f09deae"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/Cargo.toml cargo-0.19.0/vendor/gcc-0.3.45/Cargo.toml --- cargo-0.17.0/vendor/gcc-0.3.45/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,27 @@ +[package] + +name = "gcc" +version = "0.3.45" +authors = ["Alex Crichton "] +license = "MIT/Apache-2.0" +repository = "https://github.com/alexcrichton/gcc-rs" +documentation = "https://docs.rs/gcc" +description = """ +A build-time dependency for Cargo build scripts to assist in invoking the native +C compiler to compile native C code into a static archive to be linked into Rust +code. +""" +keywords = ["build-dependencies"] + +[badges] +travis-ci = { repository = "alexcrichton/gcc-rs" } +appveyor = { repository = "alexcrichton/gcc-rs" } + +[dependencies] +rayon = { version = "0.6", optional = true } + +[features] +parallel = ["rayon"] + +[dev-dependencies] +tempdir = "0.3" diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/.gitignore cargo-0.19.0/vendor/gcc-0.3.45/.gitignore --- cargo-0.17.0/vendor/gcc-0.3.45/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/.gitignore 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,4 @@ +target +Cargo.lock +.idea +*.iml diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/LICENSE-APACHE cargo-0.19.0/vendor/gcc-0.3.45/LICENSE-APACHE --- cargo-0.17.0/vendor/gcc-0.3.45/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/LICENSE-APACHE 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/LICENSE-MIT cargo-0.19.0/vendor/gcc-0.3.45/LICENSE-MIT --- cargo-0.17.0/vendor/gcc-0.3.45/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/LICENSE-MIT 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/README.md cargo-0.19.0/vendor/gcc-0.3.45/README.md --- cargo-0.17.0/vendor/gcc-0.3.45/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/README.md 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,163 @@ +# gcc-rs + +A library to compile C/C++ code into a Rust library/application. + +[![Build Status](https://travis-ci.org/alexcrichton/gcc-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/gcc-rs) +[![Build status](https://ci.appveyor.com/api/projects/status/onu270iw98h81nwv?svg=true)](https://ci.appveyor.com/project/alexcrichton/gcc-rs) + +[Documentation](https://docs.rs/gcc) + +A simple library meant to be used as a build dependency with Cargo packages in +order to build a set of C/C++ files into a static archive. Note that while this +crate is called "gcc", it actually calls out to the most relevant compile for +a platform, for example using `cl` on MSVC. That is, this crate does indeed work +on MSVC! + +## Using gcc-rs + +First, you'll want to both add a build script for your crate (`build.rs`) and +also add this crate to your `Cargo.toml` via: + +```toml +[package] +# ... +build = "build.rs" + +[build-dependencies] +gcc = "0.3" +``` + +Next up, you'll want to write a build script like so: + +```rust,no_run +// build.rs + +extern crate gcc; + +fn main() { + gcc::compile_library("libfoo.a", &["foo.c", "bar.c"]); +} +``` + +And that's it! Running `cargo build` should take care of the rest and your Rust +application will now have the C files `foo.c` and `bar.c` compiled into it. You +can call the functions in Rust by declaring functions in your Rust code like so: + +``` +extern { + fn foo_function(); + fn bar_function(); +} + +pub fn call() { + unsafe { + foo_function(); + bar_function(); + } +} + +fn main() { + // ... +} +``` + +## External configuration via environment variables + +To control the programs and flags used for building, the builder can set a +number of different environment variables. + +* `CFLAGS` - a series of space separated flags passed to "gcc". Note that + individual flags cannot currently contain spaces, so doing + something like: "-L=foo\ bar" is not possible. +* `CC` - the actual C compiler used. Note that this is used as an exact + executable name, so (for example) no extra flags can be passed inside + this variable, and the builder must ensure that there aren't any + trailing spaces. This compiler must understand the `-c` flag. For + certain `TARGET`s, it also is assumed to know about other flags (most + common is `-fPIC`). +* `AR` - the `ar` (archiver) executable to use to build the static library. + +Each of these variables can also be supplied with certain prefixes and suffixes, +in the following prioritized order: + +1. `_` - for example, `CC_x86_64-unknown-linux-gnu` +2. `_` - for example, `CC_x86_64_unknown_linux_gnu` +3. `_` - for example, `HOST_CC` or `TARGET_CFLAGS` +4. `` - a plain `CC`, `AR` as above. + +If none of these variables exist, gcc-rs uses built-in defaults + +In addition to the the above optional environment variables, `gcc-rs` has some +functions with hard requirements on some variables supplied by [cargo's +build-script driver][cargo] that it has the `TARGET`, `OUT_DIR`, `OPT_LEVEL`, +and `HOST` variables. + +[cargo]: http://doc.crates.io/build-script.html#inputs-to-the-build-script + +## Optional features + +Currently gcc-rs supports parallel compilation (think `make -jN`) but this +feature is turned off by default. To enable gcc-rs to compile C/C++ in parallel, +you can change your dependency to: + +```toml +[build-dependencies] +gcc = { version = "0.3", features = ["parallel"] } +``` + +By default gcc-rs will limit parallelism to `$NUM_JOBS`, or if not present it +will limit it to the number of cpus on the machine. If you are using cargo, +use `-jN` option of `build`, `test` and `run` commands as `$NUM_JOBS` +is supplied by cargo. + +## Compile-time Requirements + +To work properly this crate needs access to a C compiler when the build script +is being run. This crate does not ship a C compiler with it. The compiler +required varies per platform, but there are three broad categories: + +* Unix platforms require `cc` to be the C compiler. This can be found by + installing gcc/clang on Linux distributions and Xcode on OSX, for example. +* Windows platforms targeting MSVC (e.g. your target triple ends in `-msvc`) + require `cl.exe` to be available and in `PATH`. This is typically found in + standard Visual Studio installations and the `PATH` can be set up by running + the appropriate developer tools shell. +* Windows platforms targeting MinGW (e.g. your target triple ends in `-gnu`) + require `gcc` to be available in `PATH`. We recommend the + [MinGW-w64](http://mingw-w64.org) distribution, which is using the + [Win-builds](http://win-builds.org) installation system. + You may also acquire it via + [MSYS2](http://msys2.github.io), as explained [here][msys2-help]. Make sure + to install the appropriate architecture corresponding to your installation of + rustc. GCC from older [MinGW](http://www.mingw.org) project is compatible + only with 32-bit rust compiler. + +[msys2-help]: http://github.com/rust-lang/rust#building-on-windows + +## C++ support + +`gcc-rs` supports C++ libraries compilation by using the `cpp` method on +`Config`: + +```rust,no_run +extern crate gcc; + +fn main() { + gcc::Config::new() + .cpp(true) // Switch to C++ library compilation. + .file("foo.cpp") + .compile("libfoo.a"); +} +``` + +When using C++ library compilation switch, the `CXX` and `CXXFLAGS` env +variables are used instead of `CC` and `CFLAGS` and the C++ standard library is +linked to the crate target. + +## License + +`gcc-rs` is primarily distributed under the terms of both the MIT license and +the Apache License (Version 2.0), with portions covered by various BSD-like +licenses. + +See LICENSE-APACHE, and LICENSE-MIT for details. diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/src/bin/gcc-shim.rs cargo-0.19.0/vendor/gcc-0.3.45/src/bin/gcc-shim.rs --- cargo-0.17.0/vendor/gcc-0.3.45/src/bin/gcc-shim.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/src/bin/gcc-shim.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,23 @@ +#![cfg_attr(test, allow(dead_code))] + +use std::env; +use std::fs::File; +use std::io::prelude::*; +use std::path::PathBuf; + +fn main() { + let out_dir = PathBuf::from(env::var_os("GCCTEST_OUT_DIR").unwrap()); + for i in 0.. { + let candidate = out_dir.join(format!("out{}", i)); + if candidate.exists() { + continue; + } + let mut f = File::create(candidate).unwrap(); + for arg in env::args().skip(1) { + writeln!(f, "{}", arg).unwrap(); + } + + File::create(out_dir.join("libfoo.a")).unwrap(); + break; + } +} diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/src/lib.rs cargo-0.19.0/vendor/gcc-0.3.45/src/lib.rs --- cargo-0.17.0/vendor/gcc-0.3.45/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/src/lib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,1153 @@ +//! A library for build scripts to compile custom C code +//! +//! This library is intended to be used as a `build-dependencies` entry in +//! `Cargo.toml`: +//! +//! ```toml +//! [build-dependencies] +//! gcc = "0.3" +//! ``` +//! +//! The purpose of this crate is to provide the utility functions necessary to +//! compile C code into a static archive which is then linked into a Rust crate. +//! The top-level `compile_library` function serves as a convenience and more +//! advanced configuration is available through the `Config` builder. +//! +//! This crate will automatically detect situations such as cross compilation or +//! other environment variables set by Cargo and will build code appropriately. +//! +//! # Examples +//! +//! Use the default configuration: +//! +//! ```no_run +//! extern crate gcc; +//! +//! fn main() { +//! gcc::compile_library("libfoo.a", &["src/foo.c"]); +//! } +//! ``` +//! +//! Use more advanced configuration: +//! +//! ```no_run +//! extern crate gcc; +//! +//! fn main() { +//! gcc::Config::new() +//! .file("src/foo.c") +//! .define("FOO", Some("bar")) +//! .include("src") +//! .compile("libfoo.a"); +//! } +//! ``` + +#![doc(html_root_url = "https://docs.rs/gcc/0.3")] +#![cfg_attr(test, deny(warnings))] +#![deny(missing_docs)] + +#[cfg(feature = "parallel")] +extern crate rayon; + +use std::env; +use std::ffi::{OsString, OsStr}; +use std::fs; +use std::path::{PathBuf, Path}; +use std::process::{Command, Stdio, Child}; +use std::io::{self, BufReader, BufRead, Read, Write}; +use std::thread::{self, JoinHandle}; + +#[cfg(windows)] +mod registry; +pub mod windows_registry; + +/// Extra configuration to pass to gcc. +pub struct Config { + include_directories: Vec, + definitions: Vec<(String, Option)>, + objects: Vec, + flags: Vec, + files: Vec, + cpp: bool, + cpp_link_stdlib: Option>, + cpp_set_stdlib: Option, + target: Option, + host: Option, + out_dir: Option, + opt_level: Option, + debug: Option, + env: Vec<(OsString, OsString)>, + compiler: Option, + archiver: Option, + cargo_metadata: bool, + pic: Option, +} + +/// Configuration used to represent an invocation of a C compiler. +/// +/// This can be used to figure out what compiler is in use, what the arguments +/// to it are, and what the environment variables look like for the compiler. +/// This can be used to further configure other build systems (e.g. forward +/// along CC and/or CFLAGS) or the `to_command` method can be used to run the +/// compiler itself. +pub struct Tool { + path: PathBuf, + args: Vec, + env: Vec<(OsString, OsString)>, + family: ToolFamily +} + +/// Represents the family of tools this tool belongs to. +/// +/// Each family of tools differs in how and what arguments they accept. +/// +/// Detection of a family is done on best-effort basis and may not accurately reflect the tool. +#[derive(Copy, Clone, Debug)] +enum ToolFamily { + /// Tool is GNU Compiler Collection-like. + Gnu, + /// Tool is Clang-like. It differs from the GCC in a sense that it accepts superset of flags + /// and its cross-compilation approach is different. + Clang, + /// Tool is the MSVC cl.exe. + Msvc, +} + +impl ToolFamily { + /// What the flag to request debug info for this family of tools look like + fn debug_flag(&self) -> &'static str { + match *self { + ToolFamily::Msvc => "/Z7", + ToolFamily::Gnu | + ToolFamily::Clang => "-g", + } + } + + /// What the flag to include directories into header search path looks like + fn include_flag(&self) -> &'static str { + match *self { + ToolFamily::Msvc => "/I", + ToolFamily::Gnu | + ToolFamily::Clang => "-I", + } + } + + /// What the flag to request macro-expanded source output looks like + fn expand_flag(&self) -> &'static str { + match *self { + ToolFamily::Msvc => "/E", + ToolFamily::Gnu | + ToolFamily::Clang => "-E", + } + } +} + +/// Compile a library from the given set of input C files. +/// +/// This will simply compile all files into object files and then assemble them +/// into the output. This will read the standard environment variables to detect +/// cross compilations and such. +/// +/// This function will also print all metadata on standard output for Cargo. +/// +/// # Example +/// +/// ```no_run +/// gcc::compile_library("libfoo.a", &["foo.c", "bar.c"]); +/// ``` +pub fn compile_library(output: &str, files: &[&str]) { + let mut c = Config::new(); + for f in files.iter() { + c.file(*f); + } + c.compile(output); +} + +impl Config { + /// Construct a new instance of a blank set of configuration. + /// + /// This builder is finished with the `compile` function. + pub fn new() -> Config { + Config { + include_directories: Vec::new(), + definitions: Vec::new(), + objects: Vec::new(), + flags: Vec::new(), + files: Vec::new(), + cpp: false, + cpp_link_stdlib: None, + cpp_set_stdlib: None, + target: None, + host: None, + out_dir: None, + opt_level: None, + debug: None, + env: Vec::new(), + compiler: None, + archiver: None, + cargo_metadata: true, + pic: None, + } + } + + /// Add a directory to the `-I` or include path for headers + pub fn include>(&mut self, dir: P) -> &mut Config { + self.include_directories.push(dir.as_ref().to_path_buf()); + self + } + + /// Specify a `-D` variable with an optional value. + pub fn define(&mut self, var: &str, val: Option<&str>) -> &mut Config { + self.definitions.push((var.to_string(), val.map(|s| s.to_string()))); + self + } + + /// Add an arbitrary object file to link in + pub fn object>(&mut self, obj: P) -> &mut Config { + self.objects.push(obj.as_ref().to_path_buf()); + self + } + + /// Add an arbitrary flag to the invocation of the compiler + pub fn flag(&mut self, flag: &str) -> &mut Config { + self.flags.push(flag.to_string()); + self + } + + /// Add a file which will be compiled + pub fn file>(&mut self, p: P) -> &mut Config { + self.files.push(p.as_ref().to_path_buf()); + self + } + + /// Set C++ support. + /// + /// The other `cpp_*` options will only become active if this is set to + /// `true`. + pub fn cpp(&mut self, cpp: bool) -> &mut Config { + self.cpp = cpp; + self + } + + /// Set the standard library to link against when compiling with C++ + /// support. + /// + /// The default value of this property depends on the current target: On + /// OS X `Some("c++")` is used, when compiling for a Visual Studio based + /// target `None` is used and for other targets `Some("stdc++")` is used. + /// + /// A value of `None` indicates that no automatic linking should happen, + /// otherwise cargo will link against the specified library. + /// + /// The given library name must not contain the `lib` prefix. + pub fn cpp_link_stdlib(&mut self, cpp_link_stdlib: Option<&str>) -> &mut Config { + self.cpp_link_stdlib = Some(cpp_link_stdlib.map(|s| s.into())); + self + } + + /// Force the C++ compiler to use the specified standard library. + /// + /// Setting this option will automatically set `cpp_link_stdlib` to the same + /// value. + /// + /// The default value of this option is always `None`. + /// + /// This option has no effect when compiling for a Visual Studio based + /// target. + /// + /// This option sets the `-stdlib` flag, which is only supported by some + /// compilers (clang, icc) but not by others (gcc). The library will not + /// detect which compiler is used, as such it is the responsibility of the + /// caller to ensure that this option is only used in conjuction with a + /// compiler which supports the `-stdlib` flag. + /// + /// A value of `None` indicates that no specific C++ standard library should + /// be used, otherwise `-stdlib` is added to the compile invocation. + /// + /// The given library name must not contain the `lib` prefix. + pub fn cpp_set_stdlib(&mut self, cpp_set_stdlib: Option<&str>) -> &mut Config { + self.cpp_set_stdlib = cpp_set_stdlib.map(|s| s.into()); + self.cpp_link_stdlib(cpp_set_stdlib); + self + } + + /// Configures the target this configuration will be compiling for. + /// + /// This option is automatically scraped from the `TARGET` environment + /// variable by build scripts, so it's not required to call this function. + pub fn target(&mut self, target: &str) -> &mut Config { + self.target = Some(target.to_string()); + self + } + + /// Configures the host assumed by this configuration. + /// + /// This option is automatically scraped from the `HOST` environment + /// variable by build scripts, so it's not required to call this function. + pub fn host(&mut self, host: &str) -> &mut Config { + self.host = Some(host.to_string()); + self + } + + /// Configures the optimization level of the generated object files. + /// + /// This option is automatically scraped from the `OPT_LEVEL` environment + /// variable by build scripts, so it's not required to call this function. + pub fn opt_level(&mut self, opt_level: u32) -> &mut Config { + self.opt_level = Some(opt_level.to_string()); + self + } + + /// Configures the optimization level of the generated object files. + /// + /// This option is automatically scraped from the `OPT_LEVEL` environment + /// variable by build scripts, so it's not required to call this function. + pub fn opt_level_str(&mut self, opt_level: &str) -> &mut Config { + self.opt_level = Some(opt_level.to_string()); + self + } + + /// Configures whether the compiler will emit debug information when + /// generating object files. + /// + /// This option is automatically scraped from the `PROFILE` environment + /// variable by build scripts (only enabled when the profile is "debug"), so + /// it's not required to call this function. + pub fn debug(&mut self, debug: bool) -> &mut Config { + self.debug = Some(debug); + self + } + + /// Configures the output directory where all object files and static + /// libraries will be located. + /// + /// This option is automatically scraped from the `OUT_DIR` environment + /// variable by build scripts, so it's not required to call this function. + pub fn out_dir>(&mut self, out_dir: P) -> &mut Config { + self.out_dir = Some(out_dir.as_ref().to_owned()); + self + } + + /// Configures the compiler to be used to produce output. + /// + /// This option is automatically determined from the target platform or a + /// number of environment variables, so it's not required to call this + /// function. + pub fn compiler>(&mut self, compiler: P) -> &mut Config { + self.compiler = Some(compiler.as_ref().to_owned()); + self + } + + /// Configures the tool used to assemble archives. + /// + /// This option is automatically determined from the target platform or a + /// number of environment variables, so it's not required to call this + /// function. + pub fn archiver>(&mut self, archiver: P) -> &mut Config { + self.archiver = Some(archiver.as_ref().to_owned()); + self + } + /// Define whether metadata should be emitted for cargo allowing it to + /// automatically link the binary. Defaults to `true`. + pub fn cargo_metadata(&mut self, cargo_metadata: bool) -> &mut Config { + self.cargo_metadata = cargo_metadata; + self + } + + /// Configures whether the compiler will emit position independent code. + /// + /// This option defaults to `false` for `windows-gnu` targets and + /// to `true` for all other targets. + pub fn pic(&mut self, pic: bool) -> &mut Config { + self.pic = Some(pic); + self + } + + + #[doc(hidden)] + pub fn __set_env(&mut self, a: A, b: B) -> &mut Config + where A: AsRef, + B: AsRef + { + self.env.push((a.as_ref().to_owned(), b.as_ref().to_owned())); + self + } + + /// Run the compiler, generating the file `output` + /// + /// The name `output` must begin with `lib` and end with `.a` + pub fn compile(&self, output: &str) { + assert!(output.starts_with("lib")); + assert!(output.ends_with(".a")); + let lib_name = &output[3..output.len() - 2]; + let dst = self.get_out_dir(); + + let mut objects = Vec::new(); + let mut src_dst = Vec::new(); + for file in self.files.iter() { + let obj = dst.join(file).with_extension("o"); + let obj = if !obj.starts_with(&dst) { + dst.join(obj.file_name().unwrap()) + } else { + obj + }; + fs::create_dir_all(&obj.parent().unwrap()).unwrap(); + src_dst.push((file.to_path_buf(), obj.clone())); + objects.push(obj); + } + self.compile_objects(&src_dst); + self.assemble(lib_name, &dst.join(output), &objects); + + if self.get_target().contains("msvc") { + let compiler = self.get_base_compiler(); + let atlmfc_lib = compiler.env() + .iter() + .find(|&&(ref var, _)| var.as_os_str() == OsStr::new("LIB")) + .and_then(|&(_, ref lib_paths)| { + env::split_paths(lib_paths).find(|path| { + let sub = Path::new("atlmfc/lib"); + path.ends_with(sub) || path.parent().map_or(false, |p| p.ends_with(sub)) + }) + }); + + if let Some(atlmfc_lib) = atlmfc_lib { + self.print(&format!("cargo:rustc-link-search=native={}", atlmfc_lib.display())); + } + } + + self.print(&format!("cargo:rustc-link-lib=static={}", + &output[3..output.len() - 2])); + self.print(&format!("cargo:rustc-link-search=native={}", dst.display())); + + // Add specific C++ libraries, if enabled. + if self.cpp { + if let Some(stdlib) = self.get_cpp_link_stdlib() { + self.print(&format!("cargo:rustc-link-lib={}", stdlib)); + } + } + } + + #[cfg(feature = "parallel")] + fn compile_objects(&self, objs: &[(PathBuf, PathBuf)]) { + use self::rayon::prelude::*; + + let mut cfg = rayon::Configuration::new(); + if let Ok(amt) = env::var("NUM_JOBS") { + if let Ok(amt) = amt.parse() { + cfg = cfg.set_num_threads(amt); + } + } + drop(rayon::initialize(cfg)); + + objs.par_iter().weight_max().for_each(|&(ref src, ref dst)| self.compile_object(src, dst)); + } + + #[cfg(not(feature = "parallel"))] + fn compile_objects(&self, objs: &[(PathBuf, PathBuf)]) { + for &(ref src, ref dst) in objs { + self.compile_object(src, dst); + } + } + + fn compile_object(&self, file: &Path, dst: &Path) { + let is_asm = file.extension().and_then(|s| s.to_str()) == Some("asm"); + let msvc = self.get_target().contains("msvc"); + let (mut cmd, name) = if msvc && is_asm { + self.msvc_macro_assembler() + } else { + let compiler = self.get_compiler(); + let mut cmd = compiler.to_command(); + for &(ref a, ref b) in self.env.iter() { + cmd.env(a, b); + } + (cmd, + compiler.path + .file_name() + .unwrap() + .to_string_lossy() + .into_owned()) + }; + if msvc && is_asm { + cmd.arg("/Fo").arg(dst); + } else if msvc { + let mut s = OsString::from("/Fo"); + s.push(&dst); + cmd.arg(s); + } else { + cmd.arg("-o").arg(&dst); + } + cmd.arg(if msvc { "/c" } else { "-c" }); + cmd.arg(file); + + run(&mut cmd, &name); + } + + /// Run the compiler, returning the macro-expanded version of the input files. + /// + /// This is only relevant for C and C++ files. + pub fn expand(&self) -> Vec { + let compiler = self.get_compiler(); + let mut cmd = compiler.to_command(); + for &(ref a, ref b) in self.env.iter() { + cmd.env(a, b); + } + cmd.arg(compiler.family.expand_flag()); + for file in self.files.iter() { + cmd.arg(file); + } + + let name = compiler.path + .file_name() + .unwrap() + .to_string_lossy() + .into_owned(); + + run_output(&mut cmd, &name) + } + + /// Get the compiler that's in use for this configuration. + /// + /// This function will return a `Tool` which represents the culmination + /// of this configuration at a snapshot in time. The returned compiler can + /// be inspected (e.g. the path, arguments, environment) to forward along to + /// other tools, or the `to_command` method can be used to invoke the + /// compiler itself. + /// + /// This method will take into account all configuration such as debug + /// information, optimization level, include directories, defines, etc. + /// Additionally, the compiler binary in use follows the standard + /// conventions for this path, e.g. looking at the explicitly set compiler, + /// environment variables (a number of which are inspected here), and then + /// falling back to the default configuration. + pub fn get_compiler(&self) -> Tool { + let opt_level = self.get_opt_level(); + let target = self.get_target(); + + let mut cmd = self.get_base_compiler(); + let nvcc = cmd.path.file_name() + .and_then(|p| p.to_str()).map(|p| p.contains("nvcc")) + .unwrap_or(false); + + // Non-target flags + // If the flag is not conditioned on target variable, it belongs here :) + match cmd.family { + ToolFamily::Msvc => { + cmd.args.push("/nologo".into()); + let features = env::var("CARGO_CFG_TARGET_FEATURE") + .unwrap_or(String::new()); + if features.contains("crt-static") { + cmd.args.push("/MT".into()); + } else { + cmd.args.push("/MD".into()); + } + match &opt_level[..] { + "z" | "s" => cmd.args.push("/Os".into()), + "1" => cmd.args.push("/O1".into()), + // -O3 is a valid value for gcc and clang compilers, but not msvc. Cap to /O2. + "2" | "3" => cmd.args.push("/O2".into()), + _ => {} + } + } + ToolFamily::Gnu | + ToolFamily::Clang => { + cmd.args.push(format!("-O{}", opt_level).into()); + if !nvcc { + cmd.args.push("-ffunction-sections".into()); + cmd.args.push("-fdata-sections".into()); + if self.pic.unwrap_or(!target.contains("windows-gnu")) { + cmd.args.push("-fPIC".into()); + } + } else if self.pic.unwrap_or(false) { + cmd.args.push("-Xcompiler".into()); + cmd.args.push("\'-fPIC\'".into()); + } + } + } + for arg in self.envflags(if self.cpp {"CXXFLAGS"} else {"CFLAGS"}) { + cmd.args.push(arg.into()); + } + + if self.get_debug() { + cmd.args.push(cmd.family.debug_flag().into()); + } + + // Target flags + match cmd.family { + ToolFamily::Clang => { + cmd.args.push(format!("--target={}", target).into()); + } + ToolFamily::Msvc => { + if target.contains("i586") { + cmd.args.push("/ARCH:IA32".into()); + } + } + ToolFamily::Gnu => { + if target.contains("i686") || target.contains("i586") { + cmd.args.push("-m32".into()); + } else if target.contains("x86_64") || target.contains("powerpc64") { + cmd.args.push("-m64".into()); + } + + if target.contains("musl") { + cmd.args.push("-static".into()); + } + + // armv7 targets get to use armv7 instructions + if target.starts_with("armv7-unknown-linux-") { + cmd.args.push("-march=armv7-a".into()); + } + + // On android we can guarantee some extra float instructions + // (specified in the android spec online) + if target.starts_with("armv7-linux-androideabi") { + cmd.args.push("-march=armv7-a".into()); + cmd.args.push("-mfpu=vfpv3-d16".into()); + } + + // For us arm == armv6 by default + if target.starts_with("arm-unknown-linux-") { + cmd.args.push("-march=armv6".into()); + cmd.args.push("-marm".into()); + } + + // Turn codegen down on i586 to avoid some instructions. + if target.starts_with("i586-unknown-linux-") { + cmd.args.push("-march=pentium".into()); + } + + // Set codegen level for i686 correctly + if target.starts_with("i686-unknown-linux-") { + cmd.args.push("-march=i686".into()); + } + + // Looks like `musl-gcc` makes is hard for `-m32` to make its way + // all the way to the linker, so we need to actually instruct the + // linker that we're generating 32-bit executables as well. This'll + // typically only be used for build scripts which transitively use + // these flags that try to compile executables. + if target == "i686-unknown-linux-musl" { + cmd.args.push("-Wl,-melf_i386".into()); + } + + if target.starts_with("thumb") { + cmd.args.push("-mthumb".into()); + + if target.ends_with("eabihf") { + cmd.args.push("-mfloat-abi=hard".into()) + } + } + if target.starts_with("thumbv6m") { + cmd.args.push("-march=armv6s-m".into()); + } + if target.starts_with("thumbv7em") { + cmd.args.push("-march=armv7e-m".into()); + + if target.ends_with("eabihf") { + cmd.args.push("-mfpu=fpv4-sp-d16".into()) + } + } + if target.starts_with("thumbv7m") { + cmd.args.push("-march=armv7-m".into()); + } + } + } + + if target.contains("-ios") { + // FIXME: potential bug. iOS is always compiled with Clang, but Gcc compiler may be + // detected instead. + self.ios_flags(&mut cmd); + } + + if self.cpp { + match (self.cpp_set_stdlib.as_ref(), cmd.family) { + (None, _) => { } + (Some(stdlib), ToolFamily::Gnu) | + (Some(stdlib), ToolFamily::Clang) => { + cmd.args.push(format!("-stdlib=lib{}", stdlib).into()); + } + _ => { + println!("cargo:warning=cpp_set_stdlib is specified, but the {:?} compiler \ + does not support this option, ignored", cmd.family); + } + } + } + + for directory in self.include_directories.iter() { + cmd.args.push(cmd.family.include_flag().into()); + cmd.args.push(directory.into()); + } + + for flag in self.flags.iter() { + cmd.args.push(flag.into()); + } + + for &(ref key, ref value) in self.definitions.iter() { + let lead = if let ToolFamily::Msvc = cmd.family {"/"} else {"-"}; + if let &Some(ref value) = value { + cmd.args.push(format!("{}D{}={}", lead, key, value).into()); + } else { + cmd.args.push(format!("{}D{}", lead, key).into()); + } + } + cmd + } + + fn msvc_macro_assembler(&self) -> (Command, String) { + let target = self.get_target(); + let tool = if target.contains("x86_64") { + "ml64.exe" + } else { + "ml.exe" + }; + let mut cmd = windows_registry::find(&target, tool).unwrap_or_else(|| self.cmd(tool)); + for directory in self.include_directories.iter() { + cmd.arg("/I").arg(directory); + } + for &(ref key, ref value) in self.definitions.iter() { + if let &Some(ref value) = value { + cmd.arg(&format!("/D{}={}", key, value)); + } else { + cmd.arg(&format!("/D{}", key)); + } + } + + if target.contains("i686") || target.contains("i586") { + cmd.arg("/safeseh"); + } + for flag in self.flags.iter() { + cmd.arg(flag); + } + + (cmd, tool.to_string()) + } + + fn assemble(&self, lib_name: &str, dst: &Path, objects: &[PathBuf]) { + // Delete the destination if it exists as the `ar` tool at least on Unix + // appends to it, which we don't want. + let _ = fs::remove_file(&dst); + + let target = self.get_target(); + if target.contains("msvc") { + let mut cmd = match self.archiver { + Some(ref s) => self.cmd(s), + None => windows_registry::find(&target, "lib.exe").unwrap_or(self.cmd("lib.exe")), + }; + let mut out = OsString::from("/OUT:"); + out.push(dst); + run(cmd.arg(out) + .arg("/nologo") + .args(objects) + .args(&self.objects), + "lib.exe"); + + // The Rust compiler will look for libfoo.a and foo.lib, but the + // MSVC linker will also be passed foo.lib, so be sure that both + // exist for now. + let lib_dst = dst.with_file_name(format!("{}.lib", lib_name)); + let _ = fs::remove_file(&lib_dst); + fs::hard_link(&dst, &lib_dst) + .or_else(|_| { + // if hard-link fails, just copy (ignoring the number of bytes written) + fs::copy(&dst, &lib_dst).map(|_| ()) + }) + .ok() + .expect("Copying from {:?} to {:?} failed.");; + } else { + let ar = self.get_ar(); + let cmd = ar.file_name().unwrap().to_string_lossy(); + run(self.cmd(&ar) + .arg("crs") + .arg(dst) + .args(objects) + .args(&self.objects), + &cmd); + } + } + + fn ios_flags(&self, cmd: &mut Tool) { + enum ArchSpec { + Device(&'static str), + Simulator(&'static str), + } + + let target = self.get_target(); + let arch = target.split('-').nth(0).unwrap(); + let arch = match arch { + "arm" | "armv7" | "thumbv7" => ArchSpec::Device("armv7"), + "armv7s" | "thumbv7s" => ArchSpec::Device("armv7s"), + "arm64" | "aarch64" => ArchSpec::Device("arm64"), + "i386" | "i686" => ArchSpec::Simulator("-m32"), + "x86_64" => ArchSpec::Simulator("-m64"), + _ => fail("Unknown arch for iOS target"), + }; + + let sdk = match arch { + ArchSpec::Device(arch) => { + cmd.args.push("-arch".into()); + cmd.args.push(arch.into()); + cmd.args.push("-miphoneos-version-min=7.0".into()); + "iphoneos" + } + ArchSpec::Simulator(arch) => { + cmd.args.push(arch.into()); + cmd.args.push("-mios-simulator-version-min=7.0".into()); + "iphonesimulator" + } + }; + + self.print(&format!("Detecting iOS SDK path for {}", sdk)); + let sdk_path = self.cmd("xcrun") + .arg("--show-sdk-path") + .arg("--sdk") + .arg(sdk) + .stderr(Stdio::inherit()) + .output() + .unwrap() + .stdout; + + let sdk_path = String::from_utf8(sdk_path).unwrap(); + + cmd.args.push("-isysroot".into()); + cmd.args.push(sdk_path.trim().into()); + } + + fn cmd>(&self, prog: P) -> Command { + let mut cmd = Command::new(prog); + for &(ref a, ref b) in self.env.iter() { + cmd.env(a, b); + } + return cmd; + } + + fn get_base_compiler(&self) -> Tool { + if let Some(ref c) = self.compiler { + return Tool::new(c.clone()); + } + let host = self.get_host(); + let target = self.get_target(); + let (env, msvc, gnu) = if self.cpp { + ("CXX", "cl.exe", "g++") + } else { + ("CC", "cl.exe", "gcc") + }; + + let default = if host.contains("solaris") { + // In this case, c++/cc unlikely to exist or be correct. + gnu + } else if self.cpp { + "c++" + } else { + "cc" + }; + + self.env_tool(env) + .map(|(tool, args)| { + let mut t = Tool::new(PathBuf::from(tool)); + for arg in args { + t.args.push(arg.into()); + } + return t; + }) + .or_else(|| { + if target.contains("emscripten") { + if self.cpp { + Some(Tool::new(PathBuf::from("em++"))) + } else { + Some(Tool::new(PathBuf::from("emcc"))) + } + } else { + None + } + }) + .or_else(|| windows_registry::find_tool(&target, "cl.exe")) + .unwrap_or_else(|| { + let compiler = if host.contains("windows") && target.contains("windows") { + if target.contains("msvc") { + msvc.to_string() + } else { + format!("{}.exe", gnu) + } + } else if target.contains("android") { + format!("{}-{}", target.replace("armv7", "arm"), gnu) + } else if self.get_host() != target { + // CROSS_COMPILE is of the form: "arm-linux-gnueabi-" + let cc_env = self.getenv("CROSS_COMPILE"); + let cross_compile = cc_env.as_ref().map(|s| s.trim_right_matches('-')); + let prefix = cross_compile.or(match &target[..] { + "aarch64-unknown-linux-gnu" => Some("aarch64-linux-gnu"), + "arm-unknown-linux-gnueabi" => Some("arm-linux-gnueabi"), + "arm-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), + "arm-unknown-linux-musleabi" => Some("arm-linux-musleabi"), + "arm-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), + "arm-unknown-netbsdelf-eabi" => Some("arm--netbsdelf-eabi"), + "armv6-unknown-netbsdelf-eabihf" => Some("armv6--netbsdelf-eabihf"), + "armv7-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), + "armv7-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), + "armv7-unknown-netbsdelf-eabihf" => Some("armv7--netbsdelf-eabihf"), + "i686-pc-windows-gnu" => Some("i686-w64-mingw32"), + "i686-unknown-linux-musl" => Some("musl"), + "i686-unknown-netbsdelf" => Some("i486--netbsdelf"), + "mips-unknown-linux-gnu" => Some("mips-linux-gnu"), + "mipsel-unknown-linux-gnu" => Some("mipsel-linux-gnu"), + "mips64-unknown-linux-gnuabi64" => Some("mips64-linux-gnuabi64"), + "mips64el-unknown-linux-gnuabi64" => Some("mips64el-linux-gnuabi64"), + "powerpc-unknown-linux-gnu" => Some("powerpc-linux-gnu"), + "powerpc-unknown-netbsd" => Some("powerpc--netbsd"), + "powerpc64-unknown-linux-gnu" => Some("powerpc-linux-gnu"), + "powerpc64le-unknown-linux-gnu" => Some("powerpc64le-linux-gnu"), + "s390x-unknown-linux-gnu" => Some("s390x-linux-gnu"), + "sparc64-unknown-netbsd" => Some("sparc64--netbsd"), + "sparcv9-sun-solaris" => Some("sparcv9-sun-solaris"), + "thumbv6m-none-eabi" => Some("arm-none-eabi"), + "thumbv7em-none-eabi" => Some("arm-none-eabi"), + "thumbv7em-none-eabihf" => Some("arm-none-eabi"), + "thumbv7m-none-eabi" => Some("arm-none-eabi"), + "x86_64-pc-windows-gnu" => Some("x86_64-w64-mingw32"), + "x86_64-rumprun-netbsd" => Some("x86_64-rumprun-netbsd"), + "x86_64-unknown-linux-musl" => Some("musl"), + "x86_64-unknown-netbsd" => Some("x86_64--netbsd"), + _ => None, + }); + match prefix { + Some(prefix) => format!("{}-{}", prefix, gnu), + None => default.to_string(), + } + } else { + default.to_string() + }; + Tool::new(PathBuf::from(compiler)) + }) + } + + fn get_var(&self, var_base: &str) -> Result { + let target = self.get_target(); + let host = self.get_host(); + let kind = if host == target { "HOST" } else { "TARGET" }; + let target_u = target.replace("-", "_"); + let res = self.getenv(&format!("{}_{}", var_base, target)) + .or_else(|| self.getenv(&format!("{}_{}", var_base, target_u))) + .or_else(|| self.getenv(&format!("{}_{}", kind, var_base))) + .or_else(|| self.getenv(var_base)); + + match res { + Some(res) => Ok(res), + None => Err("could not get environment variable".to_string()), + } + } + + fn envflags(&self, name: &str) -> Vec { + self.get_var(name) + .unwrap_or(String::new()) + .split(|c: char| c.is_whitespace()) + .filter(|s| !s.is_empty()) + .map(|s| s.to_string()) + .collect() + } + + fn env_tool(&self, name: &str) -> Option<(String, Vec)> { + self.get_var(name).ok().map(|tool| { + let whitelist = ["ccache", "distcc"]; + for t in whitelist.iter() { + if tool.starts_with(t) && tool[t.len()..].starts_with(" ") { + return (t.to_string(), vec![tool[t.len()..].trim_left().to_string()]); + } + } + (tool, Vec::new()) + }) + } + + /// Returns the default C++ standard library for the current target: `libc++` + /// for OS X and `libstdc++` for anything else. + fn get_cpp_link_stdlib(&self) -> Option { + self.cpp_link_stdlib.clone().unwrap_or_else(|| { + let target = self.get_target(); + if target.contains("msvc") { + None + } else if target.contains("darwin") { + Some("c++".to_string()) + } else if target.contains("freebsd") { + Some("c++".to_string()) + } else { + Some("stdc++".to_string()) + } + }) + } + + fn get_ar(&self) -> PathBuf { + self.archiver + .clone() + .or_else(|| self.get_var("AR").map(PathBuf::from).ok()) + .unwrap_or_else(|| { + if self.get_target().contains("android") { + PathBuf::from(format!("{}-ar", self.get_target().replace("armv7", "arm"))) + } else if self.get_target().contains("emscripten") { + PathBuf::from("emar") + } else { + PathBuf::from("ar") + } + }) + } + + fn get_target(&self) -> String { + self.target.clone().unwrap_or_else(|| self.getenv_unwrap("TARGET")) + } + + fn get_host(&self) -> String { + self.host.clone().unwrap_or_else(|| self.getenv_unwrap("HOST")) + } + + fn get_opt_level(&self) -> String { + self.opt_level.as_ref().cloned().unwrap_or_else(|| self.getenv_unwrap("OPT_LEVEL")) + } + + fn get_debug(&self) -> bool { + self.debug.unwrap_or_else(|| self.getenv_unwrap("PROFILE") == "debug") + } + + fn get_out_dir(&self) -> PathBuf { + self.out_dir.clone().unwrap_or_else(|| env::var_os("OUT_DIR").map(PathBuf::from).unwrap()) + } + + fn getenv(&self, v: &str) -> Option { + let r = env::var(v).ok(); + self.print(&format!("{} = {:?}", v, r)); + r + } + + fn getenv_unwrap(&self, v: &str) -> String { + match self.getenv(v) { + Some(s) => s, + None => fail(&format!("environment variable `{}` not defined", v)), + } + } + + fn print(&self, s: &str) { + if self.cargo_metadata { + println!("{}", s); + } + } +} + +impl Tool { + fn new(path: PathBuf) -> Tool { + // Try to detect family of the tool from its name, falling back to Gnu. + let family = if let Some(fname) = path.file_name().and_then(|p| p.to_str()) { + if fname.contains("clang") { + ToolFamily::Clang + } else if fname.contains("cl") { + ToolFamily::Msvc + } else { + ToolFamily::Gnu + } + } else { + ToolFamily::Gnu + }; + Tool { + path: path, + args: Vec::new(), + env: Vec::new(), + family: family + } + } + + /// Converts this compiler into a `Command` that's ready to be run. + /// + /// This is useful for when the compiler needs to be executed and the + /// command returned will already have the initial arguments and environment + /// variables configured. + pub fn to_command(&self) -> Command { + let mut cmd = Command::new(&self.path); + cmd.args(&self.args); + for &(ref k, ref v) in self.env.iter() { + cmd.env(k, v); + } + cmd + } + + /// Returns the path for this compiler. + /// + /// Note that this may not be a path to a file on the filesystem, e.g. "cc", + /// but rather something which will be resolved when a process is spawned. + pub fn path(&self) -> &Path { + &self.path + } + + /// Returns the default set of arguments to the compiler needed to produce + /// executables for the target this compiler generates. + pub fn args(&self) -> &[OsString] { + &self.args + } + + /// Returns the set of environment variables needed for this compiler to + /// operate. + /// + /// This is typically only used for MSVC compilers currently. + pub fn env(&self) -> &[(OsString, OsString)] { + &self.env + } +} + +fn run(cmd: &mut Command, program: &str) { + let (mut child, print) = spawn(cmd, program); + let status = child.wait().expect("failed to wait on child process"); + print.join().unwrap(); + println!("{:?}", status); + if !status.success() { + fail(&format!("command did not execute successfully, got: {}", status)); + } +} + +fn run_output(cmd: &mut Command, program: &str) -> Vec { + cmd.stdout(Stdio::piped()); + let (mut child, print) = spawn(cmd, program); + let mut stdout = vec![]; + child.stdout.take().unwrap().read_to_end(&mut stdout).unwrap(); + let status = child.wait().expect("failed to wait on child process"); + print.join().unwrap(); + println!("{:?}", status); + if !status.success() { + fail(&format!("command did not execute successfully, got: {}", status)); + } + return stdout +} + +fn spawn(cmd: &mut Command, program: &str) -> (Child, JoinHandle<()>) { + println!("running: {:?}", cmd); + + // Capture the standard error coming from these programs, and write it out + // with cargo:warning= prefixes. Note that this is a bit wonky to avoid + // requiring the output to be UTF-8, we instead just ship bytes from one + // location to another. + match cmd.stderr(Stdio::piped()).spawn() { + Ok(mut child) => { + let stderr = BufReader::new(child.stderr.take().unwrap()); + let print = thread::spawn(move || { + for line in stderr.split(b'\n').filter_map(|l| l.ok()) { + print!("cargo:warning="); + std::io::stdout().write_all(&line).unwrap(); + println!(""); + } + }); + (child, print) + } + Err(ref e) if e.kind() == io::ErrorKind::NotFound => { + let extra = if cfg!(windows) { + " (see https://github.com/alexcrichton/gcc-rs#compile-time-requirements \ + for help)" + } else { + "" + }; + fail(&format!("failed to execute command: {}\nIs `{}` \ + not installed?{}", + e, + program, + extra)); + } + Err(e) => fail(&format!("failed to execute command: {}", e)), + } +} + +fn fail(s: &str) -> ! { + println!("\n\n{}\n\n", s); + panic!() +} diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/src/registry.rs cargo-0.19.0/vendor/gcc-0.3.45/src/registry.rs --- cargo-0.17.0/vendor/gcc-0.3.45/src/registry.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/src/registry.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,190 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::ffi::{OsString, OsStr}; +use std::io; +use std::ops::RangeFrom; +use std::os::raw; +use std::os::windows::prelude::*; + +pub struct RegistryKey(Repr); + +type HKEY = *mut u8; +type DWORD = u32; +type LPDWORD = *mut DWORD; +type LPCWSTR = *const u16; +type LPWSTR = *mut u16; +type LONG = raw::c_long; +type PHKEY = *mut HKEY; +type PFILETIME = *mut u8; +type LPBYTE = *mut u8; +type REGSAM = u32; + +const ERROR_SUCCESS: DWORD = 0; +const ERROR_NO_MORE_ITEMS: DWORD = 259; +const HKEY_LOCAL_MACHINE: HKEY = 0x80000002 as HKEY; +const REG_SZ: DWORD = 1; +const KEY_READ: DWORD = 0x20019; +const KEY_WOW64_32KEY: DWORD = 0x200; + +#[link(name = "advapi32")] +extern "system" { + fn RegOpenKeyExW(key: HKEY, + lpSubKey: LPCWSTR, + ulOptions: DWORD, + samDesired: REGSAM, + phkResult: PHKEY) + -> LONG; + fn RegEnumKeyExW(key: HKEY, + dwIndex: DWORD, + lpName: LPWSTR, + lpcName: LPDWORD, + lpReserved: LPDWORD, + lpClass: LPWSTR, + lpcClass: LPDWORD, + lpftLastWriteTime: PFILETIME) + -> LONG; + fn RegQueryValueExW(hKey: HKEY, + lpValueName: LPCWSTR, + lpReserved: LPDWORD, + lpType: LPDWORD, + lpData: LPBYTE, + lpcbData: LPDWORD) + -> LONG; + fn RegCloseKey(hKey: HKEY) -> LONG; +} + +struct OwnedKey(HKEY); + +enum Repr { + Const(HKEY), + Owned(OwnedKey), +} + +pub struct Iter<'a> { + idx: RangeFrom, + key: &'a RegistryKey, +} + +unsafe impl Sync for Repr {} +unsafe impl Send for Repr {} + +pub static LOCAL_MACHINE: RegistryKey = RegistryKey(Repr::Const(HKEY_LOCAL_MACHINE)); + +impl RegistryKey { + fn raw(&self) -> HKEY { + match self.0 { + Repr::Const(val) => val, + Repr::Owned(ref val) => val.0, + } + } + + pub fn open(&self, key: &OsStr) -> io::Result { + let key = key.encode_wide().chain(Some(0)).collect::>(); + let mut ret = 0 as *mut _; + let err = unsafe { + RegOpenKeyExW(self.raw(), + key.as_ptr(), + 0, + KEY_READ | KEY_WOW64_32KEY, + &mut ret) + }; + if err == ERROR_SUCCESS as LONG { + Ok(RegistryKey(Repr::Owned(OwnedKey(ret)))) + } else { + Err(io::Error::from_raw_os_error(err as i32)) + } + } + + pub fn iter(&self) -> Iter { + Iter { + idx: 0.., + key: self, + } + } + + pub fn query_str(&self, name: &str) -> io::Result { + let name: &OsStr = name.as_ref(); + let name = name.encode_wide().chain(Some(0)).collect::>(); + let mut len = 0; + let mut kind = 0; + unsafe { + let err = RegQueryValueExW(self.raw(), + name.as_ptr(), + 0 as *mut _, + &mut kind, + 0 as *mut _, + &mut len); + if err != ERROR_SUCCESS as LONG { + return Err(io::Error::from_raw_os_error(err as i32)); + } + if kind != REG_SZ { + return Err(io::Error::new(io::ErrorKind::Other, "registry key wasn't a string")); + } + + // The length here is the length in bytes, but we're using wide + // characters so we need to be sure to halve it for the capacity + // passed in. + let mut v = Vec::with_capacity(len as usize / 2); + let err = RegQueryValueExW(self.raw(), + name.as_ptr(), + 0 as *mut _, + 0 as *mut _, + v.as_mut_ptr() as *mut _, + &mut len); + if err != ERROR_SUCCESS as LONG { + return Err(io::Error::from_raw_os_error(err as i32)); + } + v.set_len(len as usize / 2); + + // Some registry keys may have a terminating nul character, but + // we're not interested in that, so chop it off if it's there. + if v[v.len() - 1] == 0 { + v.pop(); + } + Ok(OsString::from_wide(&v)) + } + } +} + +impl Drop for OwnedKey { + fn drop(&mut self) { + unsafe { + RegCloseKey(self.0); + } + } +} + +impl<'a> Iterator for Iter<'a> { + type Item = io::Result; + + fn next(&mut self) -> Option> { + self.idx.next().and_then(|i| unsafe { + let mut v = Vec::with_capacity(256); + let mut len = v.capacity() as DWORD; + let ret = RegEnumKeyExW(self.key.raw(), + i, + v.as_mut_ptr(), + &mut len, + 0 as *mut _, + 0 as *mut _, + 0 as *mut _, + 0 as *mut _); + if ret == ERROR_NO_MORE_ITEMS as LONG { + None + } else if ret != ERROR_SUCCESS as LONG { + Some(Err(io::Error::from_raw_os_error(ret as i32))) + } else { + v.set_len(len as usize); + Some(Ok(OsString::from_wide(&v))) + } + }) + } +} diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/src/windows_registry.rs cargo-0.19.0/vendor/gcc-0.3.45/src/windows_registry.rs --- cargo-0.17.0/vendor/gcc-0.3.45/src/windows_registry.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/src/windows_registry.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,424 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A helper module to probe the Windows Registry when looking for +//! windows-specific tools. + +use std::process::Command; + +use Tool; + +macro_rules! otry { + ($expr:expr) => (match $expr { + Some(val) => val, + None => return None, + }) +} + +/// Attempts to find a tool within an MSVC installation using the Windows +/// registry as a point to search from. +/// +/// The `target` argument is the target that the tool should work for (e.g. +/// compile or link for) and the `tool` argument is the tool to find (e.g. +/// `cl.exe` or `link.exe`). +/// +/// This function will return `None` if the tool could not be found, or it will +/// return `Some(cmd)` which represents a command that's ready to execute the +/// tool with the appropriate environment variables set. +/// +/// Note that this function always returns `None` for non-MSVC targets. +pub fn find(target: &str, tool: &str) -> Option { + find_tool(target, tool).map(|c| c.to_command()) +} + +/// Similar to the `find` function above, this function will attempt the same +/// operation (finding a MSVC tool in a local install) but instead returns a +/// `Tool` which may be introspected. +#[cfg(not(windows))] +pub fn find_tool(_target: &str, _tool: &str) -> Option { + None +} + +/// Documented above. +#[cfg(windows)] +pub fn find_tool(target: &str, tool: &str) -> Option { + use std::env; + use std::ffi::OsString; + use std::mem; + use std::path::{Path, PathBuf}; + use registry::{RegistryKey, LOCAL_MACHINE}; + + struct MsvcTool { + tool: PathBuf, + libs: Vec, + path: Vec, + include: Vec, + } + + impl MsvcTool { + fn new(tool: PathBuf) -> MsvcTool { + MsvcTool { + tool: tool, + libs: Vec::new(), + path: Vec::new(), + include: Vec::new(), + } + } + + fn into_tool(self) -> Tool { + let MsvcTool { tool, libs, path, include } = self; + let mut tool = Tool::new(tool.into()); + add_env(&mut tool, "LIB", libs); + add_env(&mut tool, "PATH", path); + add_env(&mut tool, "INCLUDE", include); + tool + } + } + + // This logic is all tailored for MSVC, if we're not that then bail out + // early. + if !target.contains("msvc") { + return None; + } + + // Looks like msbuild isn't located in the same location as other tools like + // cl.exe and lib.exe. To handle this we probe for it manually with + // dedicated registry keys. + if tool.contains("msbuild") { + return find_msbuild(target); + } + + // If VCINSTALLDIR is set, then someone's probably already run vcvars and we + // should just find whatever that indicates. + if env::var_os("VCINSTALLDIR").is_some() { + return env::var_os("PATH") + .and_then(|path| env::split_paths(&path).map(|p| p.join(tool)).find(|p| p.exists())) + .map(|path| Tool::new(path.into())); + } + + // Ok, if we're here, now comes the fun part of the probing. Default shells + // or shells like MSYS aren't really configured to execute `cl.exe` and the + // various compiler tools shipped as part of Visual Studio. Here we try to + // first find the relevant tool, then we also have to be sure to fill in + // environment variables like `LIB`, `INCLUDE`, and `PATH` to ensure that + // the tool is actually usable. + + return find_msvc_latest(tool, target, "15.0") + .or_else(|| find_msvc_latest(tool, target, "14.0")) + .or_else(|| find_msvc_12(tool, target)) + .or_else(|| find_msvc_11(tool, target)); + + // For MSVC 14 or newer we need to find the Universal CRT as well as either + // the Windows 10 SDK or Windows 8.1 SDK. + fn find_msvc_latest(tool: &str, target: &str, ver: &str) -> Option { + let vcdir = otry!(get_vc_dir(ver)); + let mut tool = otry!(get_tool(tool, &vcdir, target)); + let sub = otry!(lib_subdir(target)); + let (ucrt, ucrt_version) = otry!(get_ucrt_dir()); + + let ucrt_include = ucrt.join("include").join(&ucrt_version); + tool.include.push(ucrt_include.join("ucrt")); + + let ucrt_lib = ucrt.join("lib").join(&ucrt_version); + tool.libs.push(ucrt_lib.join("ucrt").join(sub)); + + if let Some((sdk, version)) = get_sdk10_dir() { + tool.path.push(sdk.join("bin").join(sub)); + let sdk_lib = sdk.join("lib").join(&version); + tool.libs.push(sdk_lib.join("um").join(sub)); + let sdk_include = sdk.join("include").join(&version); + tool.include.push(sdk_include.join("um")); + tool.include.push(sdk_include.join("winrt")); + tool.include.push(sdk_include.join("shared")); + } else if let Some(sdk) = get_sdk81_dir() { + tool.path.push(sdk.join("bin").join(sub)); + let sdk_lib = sdk.join("lib").join("winv6.3"); + tool.libs.push(sdk_lib.join("um").join(sub)); + let sdk_include = sdk.join("include"); + tool.include.push(sdk_include.join("um")); + tool.include.push(sdk_include.join("winrt")); + tool.include.push(sdk_include.join("shared")); + } else { + return None; + } + Some(tool.into_tool()) + } + + // For MSVC 12 we need to find the Windows 8.1 SDK. + fn find_msvc_12(tool: &str, target: &str) -> Option { + let vcdir = otry!(get_vc_dir("12.0")); + let mut tool = otry!(get_tool(tool, &vcdir, target)); + let sub = otry!(lib_subdir(target)); + let sdk81 = otry!(get_sdk81_dir()); + tool.path.push(sdk81.join("bin").join(sub)); + let sdk_lib = sdk81.join("lib").join("winv6.3"); + tool.libs.push(sdk_lib.join("um").join(sub)); + let sdk_include = sdk81.join("include"); + tool.include.push(sdk_include.join("shared")); + tool.include.push(sdk_include.join("um")); + tool.include.push(sdk_include.join("winrt")); + Some(tool.into_tool()) + } + + // For MSVC 11 we need to find the Windows 8 SDK. + fn find_msvc_11(tool: &str, target: &str) -> Option { + let vcdir = otry!(get_vc_dir("11.0")); + let mut tool = otry!(get_tool(tool, &vcdir, target)); + let sub = otry!(lib_subdir(target)); + let sdk8 = otry!(get_sdk8_dir()); + tool.path.push(sdk8.join("bin").join(sub)); + let sdk_lib = sdk8.join("lib").join("win8"); + tool.libs.push(sdk_lib.join("um").join(sub)); + let sdk_include = sdk8.join("include"); + tool.include.push(sdk_include.join("shared")); + tool.include.push(sdk_include.join("um")); + tool.include.push(sdk_include.join("winrt")); + Some(tool.into_tool()) + } + + fn add_env(tool: &mut Tool, env: &str, paths: Vec) { + let prev = env::var_os(env).unwrap_or(OsString::new()); + let prev = env::split_paths(&prev); + let new = paths.into_iter().chain(prev); + tool.env.push((env.to_string().into(), env::join_paths(new).unwrap())); + } + + // Given a possible MSVC installation directory, we look for the linker and + // then add the MSVC library path. + fn get_tool(tool: &str, path: &Path, target: &str) -> Option { + bin_subdir(target) + .into_iter() + .map(|(sub, host)| (path.join("bin").join(sub).join(tool), path.join("bin").join(host))) + .filter(|&(ref path, _)| path.is_file()) + .map(|(path, host)| { + let mut tool = MsvcTool::new(path); + tool.path.push(host); + tool + }) + .filter_map(|mut tool| { + let sub = otry!(vc_lib_subdir(target)); + tool.libs.push(path.join("lib").join(sub)); + tool.include.push(path.join("include")); + let atlmfc_path = path.join("atlmfc"); + if atlmfc_path.exists() { + tool.libs.push(atlmfc_path.join("lib").join(sub)); + tool.include.push(atlmfc_path.join("include")); + } + Some(tool) + }) + .next() + } + + // To find MSVC we look in a specific registry key for the version we are + // trying to find. + fn get_vc_dir(ver: &str) -> Option { + let key = r"SOFTWARE\Microsoft\VisualStudio\SxS\VC7"; + let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); + let path = otry!(key.query_str(ver).ok()); + Some(path.into()) + } + + // To find the Universal CRT we look in a specific registry key for where + // all the Universal CRTs are located and then sort them asciibetically to + // find the newest version. While this sort of sorting isn't ideal, it is + // what vcvars does so that's good enough for us. + // + // Returns a pair of (root, version) for the ucrt dir if found + fn get_ucrt_dir() -> Option<(PathBuf, String)> { + let key = r"SOFTWARE\Microsoft\Windows Kits\Installed Roots"; + let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); + let root = otry!(key.query_str("KitsRoot10").ok()); + let readdir = otry!(Path::new(&root).join("lib").read_dir().ok()); + let max_libdir = otry!(readdir.filter_map(|dir| dir.ok()) + .map(|dir| dir.path()) + .filter(|dir| { + dir.components() + .last() + .and_then(|c| c.as_os_str().to_str()) + .map(|c| c.starts_with("10.") && dir.join("ucrt").is_dir()) + .unwrap_or(false) + }) + .max()); + let version = max_libdir.components().last().unwrap(); + let version = version.as_os_str().to_str().unwrap().to_string(); + Some((root.into(), version)) + } + + // Vcvars finds the correct version of the Windows 10 SDK by looking + // for the include `um\Windows.h` because sometimes a given version will + // only have UCRT bits without the rest of the SDK. Since we only care about + // libraries and not includes, we instead look for `um\x64\kernel32.lib`. + // Since the 32-bit and 64-bit libraries are always installed together we + // only need to bother checking x64, making this code a tiny bit simpler. + // Like we do for the Universal CRT, we sort the possibilities + // asciibetically to find the newest one as that is what vcvars does. + fn get_sdk10_dir() -> Option<(PathBuf, String)> { + let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v10.0"; + let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); + let root = otry!(key.query_str("InstallationFolder").ok()); + let readdir = otry!(Path::new(&root).join("lib").read_dir().ok()); + let mut dirs = readdir.filter_map(|dir| dir.ok()) + .map(|dir| dir.path()) + .collect::>(); + dirs.sort(); + let dir = otry!(dirs.into_iter() + .rev() + .filter(|dir| dir.join("um").join("x64").join("kernel32.lib").is_file()) + .next()); + let version = dir.components().last().unwrap(); + let version = version.as_os_str().to_str().unwrap().to_string(); + Some((root.into(), version)) + } + + // Interestingly there are several subdirectories, `win7` `win8` and + // `winv6.3`. Vcvars seems to only care about `winv6.3` though, so the same + // applies to us. Note that if we were targetting kernel mode drivers + // instead of user mode applications, we would care. + fn get_sdk81_dir() -> Option { + let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.1"; + let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); + let root = otry!(key.query_str("InstallationFolder").ok()); + Some(root.into()) + } + + fn get_sdk8_dir() -> Option { + let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.0"; + let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); + let root = otry!(key.query_str("InstallationFolder").ok()); + Some(root.into()) + } + + const PROCESSOR_ARCHITECTURE_INTEL: u16 = 0; + const PROCESSOR_ARCHITECTURE_AMD64: u16 = 9; + const X86: u16 = PROCESSOR_ARCHITECTURE_INTEL; + const X86_64: u16 = PROCESSOR_ARCHITECTURE_AMD64; + + // When choosing the tool to use, we have to choose the one which matches + // the target architecture. Otherwise we end up in situations where someone + // on 32-bit Windows is trying to cross compile to 64-bit and it tries to + // invoke the native 64-bit compiler which won't work. + // + // For the return value of this function, the first member of the tuple is + // the folder of the tool we will be invoking, while the second member is + // the folder of the host toolchain for that tool which is essential when + // using a cross linker. We return a Vec since on x64 there are often two + // linkers that can target the architecture we desire. The 64-bit host + // linker is preferred, and hence first, due to 64-bit allowing it more + // address space to work with and potentially being faster. + fn bin_subdir(target: &str) -> Vec<(&'static str, &'static str)> { + let arch = target.split('-').next().unwrap(); + match (arch, host_arch()) { + ("i586", X86) | ("i686", X86) => vec![("", "")], + ("i586", X86_64) | ("i686", X86_64) => vec![("amd64_x86", "amd64"), ("", "")], + ("x86_64", X86) => vec![("x86_amd64", "")], + ("x86_64", X86_64) => vec![("amd64", "amd64"), ("x86_amd64", "")], + ("arm", X86) => vec![("x86_arm", "")], + ("arm", X86_64) => vec![("amd64_arm", "amd64"), ("x86_arm", "")], + _ => vec![], + } + } + + fn lib_subdir(target: &str) -> Option<&'static str> { + let arch = target.split('-').next().unwrap(); + match arch { + "i586" | "i686" => Some("x86"), + "x86_64" => Some("x64"), + "arm" => Some("arm"), + _ => None, + } + } + + // MSVC's x86 libraries are not in a subfolder + fn vc_lib_subdir(target: &str) -> Option<&'static str> { + let arch = target.split('-').next().unwrap(); + match arch { + "i586" | "i686" => Some(""), + "x86_64" => Some("amd64"), + "arm" => Some("arm"), + _ => None, + } + } + + #[allow(bad_style)] + fn host_arch() -> u16 { + type DWORD = u32; + type WORD = u16; + type LPVOID = *mut u8; + type DWORD_PTR = usize; + + #[repr(C)] + struct SYSTEM_INFO { + wProcessorArchitecture: WORD, + _wReserved: WORD, + _dwPageSize: DWORD, + _lpMinimumApplicationAddress: LPVOID, + _lpMaximumApplicationAddress: LPVOID, + _dwActiveProcessorMask: DWORD_PTR, + _dwNumberOfProcessors: DWORD, + _dwProcessorType: DWORD, + _dwAllocationGranularity: DWORD, + _wProcessorLevel: WORD, + _wProcessorRevision: WORD, + } + + extern "system" { + fn GetNativeSystemInfo(lpSystemInfo: *mut SYSTEM_INFO); + } + + unsafe { + let mut info = mem::zeroed(); + GetNativeSystemInfo(&mut info); + info.wProcessorArchitecture + } + } + + // Given a registry key, look at all the sub keys and find the one which has + // the maximal numeric value. + // + // Returns the name of the maximal key as well as the opened maximal key. + fn max_version(key: &RegistryKey) -> Option<(OsString, RegistryKey)> { + let mut max_vers = 0; + let mut max_key = None; + for subkey in key.iter().filter_map(|k| k.ok()) { + let val = subkey.to_str() + .and_then(|s| s.trim_left_matches("v").replace(".", "").parse().ok()); + let val = match val { + Some(s) => s, + None => continue, + }; + if val > max_vers { + if let Ok(k) = key.open(&subkey) { + max_vers = val; + max_key = Some((subkey, k)); + } + } + } + max_key + } + + // see http://stackoverflow.com/questions/328017/path-to-msbuild + fn find_msbuild(target: &str) -> Option { + let key = r"SOFTWARE\Microsoft\MSBuild\ToolsVersions"; + LOCAL_MACHINE.open(key.as_ref()) + .ok() + .and_then(|key| { + max_version(&key).and_then(|(_vers, key)| key.query_str("MSBuildToolsPath").ok()) + }) + .map(|path| { + let mut path = PathBuf::from(path); + path.push("MSBuild.exe"); + let mut tool = Tool::new(path); + if target.contains("x86_64") { + tool.env.push(("Platform".into(), "X64".into())); + } + tool + }) + } +} diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/tests/cc_env.rs cargo-0.19.0/vendor/gcc-0.3.45/tests/cc_env.rs --- cargo-0.17.0/vendor/gcc-0.3.45/tests/cc_env.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/tests/cc_env.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,49 @@ +extern crate tempdir; +extern crate gcc; + +use std::env; + +mod support; +use support::Test; + +#[test] +fn main() { + ccache(); + distcc(); + ccache_spaces(); +} + +fn ccache() { + let test = Test::gnu(); + test.shim("ccache"); + + env::set_var("CC", "ccache lol-this-is-not-a-compiler foo"); + test.gcc().file("foo.c").compile("libfoo.a"); + + test.cmd(0) + .must_have("lol-this-is-not-a-compiler foo") + .must_have("foo.c") + .must_not_have("ccache"); +} + +fn ccache_spaces() { + let test = Test::gnu(); + test.shim("ccache"); + + env::set_var("CC", "ccache lol-this-is-not-a-compiler foo"); + test.gcc().file("foo.c").compile("libfoo.a"); + test.cmd(0).must_have("lol-this-is-not-a-compiler foo"); +} + +fn distcc() { + let test = Test::gnu(); + test.shim("distcc"); + + env::set_var("CC", "distcc lol-this-is-not-a-compiler foo"); + test.gcc().file("foo.c").compile("libfoo.a"); + + test.cmd(0) + .must_have("lol-this-is-not-a-compiler foo") + .must_have("foo.c") + .must_not_have("distcc"); +} diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/tests/support/mod.rs cargo-0.19.0/vendor/gcc-0.3.45/tests/support/mod.rs --- cargo-0.17.0/vendor/gcc-0.3.45/tests/support/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/tests/support/mod.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,112 @@ +#![allow(dead_code)] + +use std::env; +use std::ffi::OsStr; +use std::fs::{self, File}; +use std::io::prelude::*; +use std::path::PathBuf; + +use gcc; +use tempdir::TempDir; + +pub struct Test { + pub td: TempDir, + pub gcc: PathBuf, + pub msvc: bool, +} + +pub struct Execution { + args: Vec, +} + +impl Test { + pub fn new() -> Test { + let mut gcc = PathBuf::from(env::current_exe().unwrap()); + gcc.pop(); + if gcc.ends_with("deps") { + gcc.pop(); + } + gcc.push(format!("gcc-shim{}", env::consts::EXE_SUFFIX)); + Test { + td: TempDir::new("gcc-test").unwrap(), + gcc: gcc, + msvc: false, + } + } + + pub fn gnu() -> Test { + let t = Test::new(); + t.shim("cc").shim("ar"); + t + } + + pub fn msvc() -> Test { + let mut t = Test::new(); + t.shim("cl").shim("lib.exe"); + t.msvc = true; + t + } + + pub fn shim(&self, name: &str) -> &Test { + let fname = format!("{}{}", name, env::consts::EXE_SUFFIX); + fs::hard_link(&self.gcc, self.td.path().join(&fname)) + .or_else(|_| fs::copy(&self.gcc, self.td.path().join(&fname)).map(|_| ())) + .unwrap(); + self + } + + pub fn gcc(&self) -> gcc::Config { + let mut cfg = gcc::Config::new(); + let mut path = env::split_paths(&env::var_os("PATH").unwrap()).collect::>(); + path.insert(0, self.td.path().to_owned()); + let target = if self.msvc { + "x86_64-pc-windows-msvc" + } else { + "x86_64-unknown-linux-gnu" + }; + + cfg.target(target) + .host(target) + .opt_level(2) + .debug(false) + .out_dir(self.td.path()) + .__set_env("PATH", env::join_paths(path).unwrap()) + .__set_env("GCCTEST_OUT_DIR", self.td.path()); + if self.msvc { + cfg.compiler(self.td.path().join("cl")); + cfg.archiver(self.td.path().join("lib.exe")); + } + cfg + } + + pub fn cmd(&self, i: u32) -> Execution { + let mut s = String::new(); + File::open(self.td.path().join(format!("out{}", i))) + .unwrap() + .read_to_string(&mut s) + .unwrap(); + Execution { args: s.lines().map(|s| s.to_string()).collect() } + } +} + +impl Execution { + pub fn must_have>(&self, p: P) -> &Execution { + if !self.has(p.as_ref()) { + panic!("didn't find {:?} in {:?}", p.as_ref(), self.args); + } else { + self + } + } + + pub fn must_not_have>(&self, p: P) -> &Execution { + if self.has(p.as_ref()) { + panic!("found {:?}", p.as_ref()); + } else { + self + } + } + + pub fn has(&self, p: &OsStr) -> bool { + self.args.iter().any(|arg| OsStr::new(arg) == p) + } +} diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/tests/test.rs cargo-0.19.0/vendor/gcc-0.3.45/tests/test.rs --- cargo-0.17.0/vendor/gcc-0.3.45/tests/test.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/tests/test.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,229 @@ +extern crate gcc; +extern crate tempdir; + +use support::Test; + +mod support; + +#[test] +fn gnu_smoke() { + let test = Test::gnu(); + test.gcc() + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0) + .must_have("-O2") + .must_have("foo.c") + .must_not_have("-g") + .must_have("-c") + .must_have("-ffunction-sections") + .must_have("-fdata-sections"); + test.cmd(1).must_have(test.td.path().join("foo.o")); +} + +#[test] +fn gnu_opt_level_1() { + let test = Test::gnu(); + test.gcc() + .opt_level(1) + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0) + .must_have("-O1") + .must_not_have("-O2"); +} + +#[test] +fn gnu_opt_level_s() { + let test = Test::gnu(); + test.gcc() + .opt_level_str("s") + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0) + .must_have("-Os") + .must_not_have("-O1") + .must_not_have("-O2") + .must_not_have("-O3") + .must_not_have("-Oz"); +} + +#[test] +fn gnu_debug() { + let test = Test::gnu(); + test.gcc() + .debug(true) + .file("foo.c") + .compile("libfoo.a"); + test.cmd(0).must_have("-g"); +} + +#[test] +fn gnu_x86_64() { + for vendor in &["unknown-linux-gnu", "apple-darwin"] { + let target = format!("x86_64-{}", vendor); + let test = Test::gnu(); + test.gcc() + .target(&target) + .host(&target) + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0) + .must_have("-fPIC") + .must_have("-m64"); + } +} + +#[test] +fn gnu_x86_64_no_pic() { + for vendor in &["unknown-linux-gnu", "apple-darwin"] { + let target = format!("x86_64-{}", vendor); + let test = Test::gnu(); + test.gcc() + .pic(false) + .target(&target) + .host(&target) + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0).must_not_have("-fPIC"); + } +} + +#[test] +fn gnu_i686() { + for vendor in &["unknown-linux-gnu", "apple-darwin"] { + let target = format!("i686-{}", vendor); + let test = Test::gnu(); + test.gcc() + .target(&target) + .host(&target) + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0) + .must_have("-m32"); + } +} + +#[test] +fn gnu_i686_pic() { + for vendor in &["unknown-linux-gnu", "apple-darwin"] { + let target = format!("i686-{}", vendor); + let test = Test::gnu(); + test.gcc() + .pic(true) + .target(&target) + .host(&target) + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0).must_have("-fPIC"); + } +} + +#[test] +fn gnu_set_stdlib() { + let test = Test::gnu(); + test.gcc() + .cpp_set_stdlib(Some("foo")) + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0).must_not_have("-stdlib=foo"); +} + +#[test] +fn gnu_include() { + let test = Test::gnu(); + test.gcc() + .include("foo/bar") + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0).must_have("-I").must_have("foo/bar"); +} + +#[test] +fn gnu_define() { + let test = Test::gnu(); + test.gcc() + .define("FOO", Some("bar")) + .define("BAR", None) + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0).must_have("-DFOO=bar").must_have("-DBAR"); +} + +#[test] +fn gnu_compile_assembly() { + let test = Test::gnu(); + test.gcc() + .file("foo.S") + .compile("libfoo.a"); + test.cmd(0).must_have("foo.S"); +} + +#[test] +fn msvc_smoke() { + let test = Test::msvc(); + test.gcc() + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0) + .must_have("/O2") + .must_have("foo.c") + .must_not_have("/Z7") + .must_have("/c"); + test.cmd(1).must_have(test.td.path().join("foo.o")); +} + +#[test] +fn msvc_opt_level_0() { + let test = Test::msvc(); + test.gcc() + .opt_level(0) + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0).must_not_have("/O2"); +} + +#[test] +fn msvc_debug() { + let test = Test::msvc(); + test.gcc() + .debug(true) + .file("foo.c") + .compile("libfoo.a"); + test.cmd(0).must_have("/Z7"); +} + +#[test] +fn msvc_include() { + let test = Test::msvc(); + test.gcc() + .include("foo/bar") + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0).must_have("/I").must_have("foo/bar"); +} + +#[test] +fn msvc_define() { + let test = Test::msvc(); + test.gcc() + .define("FOO", Some("bar")) + .define("BAR", None) + .file("foo.c") + .compile("libfoo.a"); + + test.cmd(0).must_have("/DFOO=bar").must_have("/DBAR"); +} diff -Nru cargo-0.17.0/vendor/gcc-0.3.45/.travis.yml cargo-0.19.0/vendor/gcc-0.3.45/.travis.yml --- cargo-0.17.0/vendor/gcc-0.3.45/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/gcc-0.3.45/.travis.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,49 @@ +language: rust +rust: + - stable + - beta + - nightly +matrix: + include: + # Minimum version supported + - rust: 1.6.0 + install: + script: cargo build + +sudo: false +install: + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then OS=unknown-linux-gnu; else OS=apple-darwin; fi + - export TARGET=$ARCH-$OS + - curl https://static.rust-lang.org/rustup.sh | + sh -s -- --add-target=$TARGET --disable-sudo -y --prefix=`rustc --print sysroot` +before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH +script: + - cargo build --verbose + - cargo test --verbose + - cargo test --verbose --features parallel + - cargo test --manifest-path gcc-test/Cargo.toml --target $TARGET + - cargo test --manifest-path gcc-test/Cargo.toml --target $TARGET --features parallel + - cargo test --manifest-path gcc-test/Cargo.toml --target $TARGET --release + - cargo doc + - cargo clean && cargo build + - rustdoc --test README.md -L target/debug -L target/debug/deps +after_success: + - travis-cargo --only nightly doc-upload +env: + global: + secure: "CBtqrudgE0PS8x3kTr44jKbC2D4nfnmdYVecooNm0qnER4B4TSvZpZSQoCgKK6k4BYQuOSyFTOwYx6M79w39ZMOgyCP9ytB+tyMWL0/+ZuUQL04yVg4M5vd3oJMkOaXbvG56ncgPyFrseY+FPDg+mXAzvJk/nily37YXjkQj2D0=" + + matrix: + - ARCH=x86_64 + - ARCH=i686 +notifications: + email: + on_success: never +os: + - linux + - osx +addons: + apt: + packages: + - g++-multilib diff -Nru cargo-0.17.0/vendor/git2-0.6.3/appveyor.yml cargo-0.19.0/vendor/git2-0.6.3/appveyor.yml --- cargo-0.17.0/vendor/git2-0.6.3/appveyor.yml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -environment: - matrix: - - TARGET: x86_64-pc-windows-gnu - MSYS_BITS: 64 - - TARGET: i686-pc-windows-gnu - MSYS_BITS: 32 - - TARGET: x86_64-pc-windows-msvc - - TARGET: i686-pc-windows-msvc -install: - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" - - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - - set PATH=%PATH%;C:\Program Files (x86)\Rust\bin - - if defined MSYS_BITS set PATH=C:\msys64\mingw%MSYS_BITS%\bin;C:\msys64\usr\bin;%PATH% - - set CARGO_TARGET_DIR=%APPVEYOR_BUILD_FOLDER%\target - - rustc -V - - cargo -V - -build: false - -test_script: - - cargo test --target %TARGET% - - cargo test --no-default-features --target %TARGET% - - cargo run --manifest-path systest/Cargo.toml --target %TARGET% diff -Nru cargo-0.17.0/vendor/git2-0.6.3/.cargo-checksum.json cargo-0.19.0/vendor/git2-0.6.3/.cargo-checksum.json --- cargo-0.17.0/vendor/git2-0.6.3/.cargo-checksum.json 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f990d3cc59423f4ac490adef9eb87a783f255505bf27ea8e88cd00f89fc46a56",".gitmodules":"768f0798b18f77ffaf1ed319c765a12894c838193490c76478b1cda14cfd0893",".travis.yml":"98644ce2fff201b5f0337eaccc6a0ddb536e5d8acf8ed962e5a008a647756308","Cargo.toml":"40fbbbcc53de75692b0de1e83500d397c2d77cc11268c09605e6b76ee001f52b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"eea41ed3dd92e3039c9851709cc370150a287284872cd391294a1a63b95f9bbf","appveyor.yml":"b381895d3a8863f2c0926efdb66f40e9a84cd8811337d78a7e5cd64e1a61c201","examples/add.rs":"7c602f1122bbf107d7ffd56ddf29ff2d14e77c4adf7f739cd2dd9e73ada10513","examples/blame.rs":"841558268e9cb3847bea87640c328356f908d1c8faa3fa10d78d659e59f32db3","examples/cat-file.rs":"6685899cef3e2414e121005e24125a0e8f250e4c073d2228fa05394e9a9e2f7a","examples/clone.rs":"e9da03f5deac59edaf6ae894b7cb9a7e5b33acb3e43b7d730d79483eb921930b","examples/diff.rs":"35a2268af23e8a299865ff7895d98227ad86c65132730eb1e3eb110888c3d85b","examples/fetch.rs":"03dd1a64a79f58675092d6c9bd9620e3cf0e43e5dce5688800bb4fc262201718","examples/init.rs":"f409d9f1145e77a39eeacf3008ee8b4ac9fe7542a291cd40b423d26fe43a34b7","examples/log.rs":"9e3c01117bc7359dc628ccb94148c959e78c44a7bbf0e8f32cfa67cc38b3cda0","examples/ls-remote.rs":"3cc68143edcc57490ecb310eb7101713a7327b4a18a467d620ef38cf31c9e71c","examples/rev-list.rs":"ce6060a45a1c81466e3ae9f34c45f14402bdb1022ef3ad502fd2036abbc7dfe5","examples/rev-parse.rs":"6ec6091f2fd9d8f4f474466d0b4bbaf356dcc804dc9889a0d31826c7c16b08d7","examples/status.rs":"953ec422081bd04a8254435f034fe8f87e1974090f08fd1a000e4e0107821544","examples/tag.rs":"07d72b4fcbb0997c0f1d96020b914d87dae4e066302b0efd244a7752cc76260e","src/blame.rs":"474cf57e34a90d2f5e190131e18d8793ea6b673b6e833a64240f8a0f80aeebea","src/blob.rs":"762ce3196d067f01be4bf5d39027d16d885c0e2735670fc062750779d12445cc","src/branch.rs":"9c2a9e93e7c6d8a98e2018cdec82396ae4dbfbb1918029e04c1ac889cf884847","src/buf.rs":"ff2ec90d03cbe76a61a1a8c913126b151c5fbaba4dc972ad378ecd630e644b6d","src/build.rs":"e194d766e84e04fae511b978ee44617bae6bba2d376d47072f04c25d467cc4f5","src/call.rs":"cae278421b3b0c3dae2085f01294f6834eac125ecc0f2e44397a9c073a00284b","src/cert.rs":"b90593113ec89c71967ae6b3aea750690c6fa936ea0d8ebb7cb506009c360af7","src/commit.rs":"3a8394d503f44dc65b52ae3911d1ea9c5b2c2c279fd4af158222658f531ec486","src/config.rs":"ccf1a08187b3ea1fe67e05e6d627ba5b27e4a2bd90056ca5bf254b514639550c","src/cred.rs":"1e4a5ccca602789c17d9bfd3dbde26aca6a561519daf36a1279785dd2a5cceda","src/describe.rs":"0210d92c3cf4626e6df0a290549cbf970acd714032c976fa320166e64f80f6c2","src/diff.rs":"f4db87ed443256e98cb8d9646747831acffe2e15141795df0da33a1bf23d6a95","src/error.rs":"560ae0f41be03070fac4de0dce8a3d9a1d16047fdd6c054d6fe64a634406e2ca","src/index.rs":"5ae43578c88d35f0532a70de5ad23ca4d13a423bcebdfe06b879a9bf4874694d","src/lib.rs":"87d6ee037943ae4b9d157382674282bb88b9347f4fde8d86f9c1998d10f51fbf","src/merge.rs":"ed6986a77d1ae8793d9c6a8df052f964f514a0205abdf8dc4d0af24bf5908532","src/message.rs":"b10d9ff203702beb97a48cdfb9a0d19f650ef4427c006bd175d72f7f24f0d9ba","src/note.rs":"6cb91b5ae8c58c4d3a46fccc55ceaf8f10a522e2f2c88c19575e7314029359b2","src/object.rs":"3cf6dd0fd75bb0070987812419a847bb19ae68c13fcd5483056450cc754c770d","src/oid.rs":"21a94d1d3c5f3bb5abb10f74cea206186660b05eafd4957a21f7cbbf89cee933","src/oid_array.rs":"a1d43026adfdf01e12fcee03462db41372a2eec08dc710855775e3a2b2b33117","src/packbuilder.rs":"2f4ff842412bdf7a545ba4361cb8e88181a677da214f207f0bb39eb937916b56","src/panic.rs":"d8a6e8899f95ad1fa041278776fe3a693689d2e7682058398868e8403baaa960","src/patch.rs":"5ede15ddedea25a408ec4f74c291ab971ce5a826927794c569173f68a57c0222","src/pathspec.rs":"adbef0a6d9f9415790d1b578ff0f71394acab18b594f7bef3910b855591fe059","src/proxy_options.rs":"6ff8717ce32d8536afee0da47cc19a4ee0df3b88c6b4736547279b699deac0ca","src/reference.rs":"d0f1e43c6164f1d8e5fd6dbdc25df5212a9ed7c0b674550ab80c3f248da578bc","src/reflog.rs":"fe88541c6297f10d8aae4fbcddcc63a9f329b6ff277d342dc14b667c01c5c69e","src/refspec.rs":"dce85c33987bb9aa23af09b782bda0e82b6cc585727bfe059780460c9993ac94","src/remote.rs":"b605cf0ac284de5493d6b09f62ea12ffb0965c25f062d22d687cea697fdb04d6","src/remote_callbacks.rs":"2bd906d0298c687f50fa4fee308ce294bcf48124ac8f73d09895b6a60e6d7b67","src/repo.rs":"a547362738dbe1cdc384fbba9a7c6617e0ea21de710c620c01986cab9b06a3cb","src/revspec.rs":"b68ee5df102191defc73eabdd948bc2681f9b900047a309fc208d640e5c493ad","src/revwalk.rs":"b04701b8c91b172aef1bc1c1592252998ac94d3be893364d0513aad453c05d68","src/signature.rs":"7671512cad49e164fb2763320706e72d31a7b25b7e200037100048bb555cb13b","src/status.rs":"ff74b77800e2b6d7d7a67f796753367500d24df27d6c0652813b554e42a0f6a2","src/string_array.rs":"ea164c415ee9cf7f654c0bbb29f58b22e9cc10d3a58a59156a9b50a8a027c27b","src/submodule.rs":"b3afe5e3fce3aa87ab328375a5730e7e86c88e1f40f0a274e5c1d8b530348a40","src/tag.rs":"17c0804e167ef56643bb729a12722ead5b72e55d83578e0130b0647673126d9c","src/test.rs":"5d251bdaea22b6d0ebdb992a169ec8671e292d75ac5f3876a104b65d3abb508c","src/time.rs":"3ee4c625da74e1ae0e1aa9488ebf666009736fcc01a1b61287f851906827d947","src/transport.rs":"13a1e0c3d5d774e1ef6200f93e5face4d3c9c0397813fc5e8aa30aa4bf6f5bd2","src/tree.rs":"8c0560204edf4f195f4a71cf6f1390169c8ebeaf742d589eabfe58e40c1651fe","src/treebuilder.rs":"fb60deaf889d9a7c98db5056f21d27820ebdeff6826bc227a79b44488dcda7b3","src/util.rs":"16c04cd1fda5d5280f55e3ed71f687c3367b7e90e508c5a5e277204eb11a7357"},"package":"0534ca86640c6a3a0687cc6bee9ec4032509a0d112d97e8241fa6b7e075f6119"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/git2-0.6.3/Cargo.toml cargo-0.19.0/vendor/git2-0.6.3/Cargo.toml --- cargo-0.17.0/vendor/git2-0.6.3/Cargo.toml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -[package] - -name = "git2" -version = "0.6.3" -authors = ["Alex Crichton "] -license = "MIT/Apache-2.0" -readme = "README.md" -keywords = ["git"] -repository = "https://github.com/alexcrichton/git2-rs" -homepage = "https://github.com/alexcrichton/git2-rs" -documentation = "http://alexcrichton.com/git2-rs" -description = """ -Bindings to libgit2 for interoperating with git repositories. This library is -both threadsafe and memory safe and allows both reading and writing git -repositories. -""" - -[dependencies] -url = "1.0" -bitflags = "0.7" -libc = "0.2" -libgit2-sys = { path = "libgit2-sys", version = "0.6.4" } - -[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies] -openssl-sys = { version = "0.9.0", optional = true } -openssl-probe = { version = "0.1", optional = true } - -[dev-dependencies] -docopt = "0.6" -rustc-serialize = "0.3" -time = "0.1" -tempdir = "0.3" - -[features] -unstable = [] -default = ["ssh", "https", "curl"] -ssh = ["libgit2-sys/ssh"] -https = ["libgit2-sys/https", "openssl-sys", "openssl-probe"] -curl = ["libgit2-sys/curl"] - -[workspace] -members = ["systest", "git2-curl"] diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/add.rs cargo-0.19.0/vendor/git2-0.6.3/examples/add.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/add.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/add.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -/* - * libgit2 "add" example - shows how to modify the index - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] -#![allow(trivial_casts)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use std::path::Path; -use docopt::Docopt; -use git2::Repository; - -#[derive(RustcDecodable)] -struct Args { - arg_spec: Vec, - flag_dry_run: bool, - flag_verbose: bool, - flag_update: bool, -} - -fn run(args: &Args) -> Result<(), git2::Error> { - let repo = try!(Repository::open(&Path::new("."))); - let mut index = try!(repo.index()); - - let cb = &mut |path: &Path, _matched_spec: &[u8]| -> i32 { - let status = repo.status_file(path).unwrap(); - - let ret = if status.contains(git2::STATUS_WT_MODIFIED) || - status.contains(git2::STATUS_WT_NEW) { - println!("add '{}'", path.display()); - 0 - } else { - 1 - }; - - if args.flag_dry_run {1} else {ret} - }; - let cb = if args.flag_verbose || args.flag_update { - Some(cb as &mut git2::IndexMatchedPath) - } else { - None - }; - - if args.flag_update { - try!(index.update_all(args.arg_spec.iter(), cb)); - } else { - try!(index.add_all(args.arg_spec.iter(), git2::ADD_DEFAULT, cb)); - } - - try!(index.write()); - Ok(()) -} - -fn main() { - const USAGE: &'static str = " -usage: add [options] [--] [..] - -Options: - -n, --dry-run dry run - -v, --verbose be verbose - -u, --update update tracked files - -h, --help show this message -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/blame.rs cargo-0.19.0/vendor/git2-0.6.3/examples/blame.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/blame.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/blame.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -/* - * libgit2 "blame" example - shows how to use the blame API - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use docopt::Docopt; -use git2::{Repository, BlameOptions}; -use std::path::Path; -use std::io::{BufReader, BufRead}; - -#[derive(RustcDecodable)] #[allow(non_snake_case)] -struct Args { - arg_path: String, - arg_spec: Option, - flag_M: bool, - flag_C: bool, - flag_F: bool, -} - -fn run(args: &Args) -> Result<(), git2::Error> { - let repo = try!(Repository::open(".")); - let path = Path::new(&args.arg_path[..]); - - // Prepare our blame options - let mut opts = BlameOptions::new(); - opts.track_copies_same_commit_moves(args.flag_M) - .track_copies_same_commit_copies(args.flag_C) - .first_parent(args.flag_F); - - let mut commit_id = "HEAD".to_string(); - - // Parse spec - if let Some(spec) = args.arg_spec.as_ref() { - - let revspec = try!(repo.revparse(spec)); - - let (oldest, newest) = if revspec.mode().contains(git2::REVPARSE_SINGLE) { - (None, revspec.from()) - } else if revspec.mode().contains(git2::REVPARSE_RANGE) { - (revspec.from(), revspec.to()) - } else { - (None, None) - }; - - if let Some(commit) = oldest { - opts.oldest_commit(commit.id()); - } - - if let Some(commit) = newest { - opts.newest_commit(commit.id()); - if !commit.id().is_zero() { - commit_id = format!("{}", commit.id()) - } - } - - } - - let spec = format!("{}:{}", commit_id, path.display()); - let blame = try!(repo.blame_file(path, Some(&mut opts))); - let object = try!(repo.revparse_single(&spec[..])); - let blob = try!(repo.find_blob(object.id())); - let reader = BufReader::new(blob.content()); - - for (i, line) in reader.lines().enumerate() { - if let (Ok(line), Some(hunk)) = (line, blame.get_line(i+1)) { - let sig = hunk.final_signature(); - println!("{} {} <{}> {}", hunk.final_commit_id(), - String::from_utf8_lossy(sig.name_bytes()), - String::from_utf8_lossy(sig.email_bytes()), line); - } - } - - Ok(()) -} - -fn main() { - const USAGE: &'static str = " -usage: blame [options] [] - -Options: - -M find line moves within and across files - -C find line copies within and across files - -F follow only the first parent commits -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/cat-file.rs cargo-0.19.0/vendor/git2-0.6.3/examples/cat-file.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/cat-file.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/cat-file.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -/* - * libgit2 "cat-file" example - shows how to print data from the ODB - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use std::io::{self, Write}; - -use docopt::Docopt; -use git2::{Repository, ObjectType, Blob, Commit, Signature, Tag, Tree}; - -#[derive(RustcDecodable)] -struct Args { - arg_object: String, - flag_t: bool, - flag_s: bool, - flag_e: bool, - flag_p: bool, - flag_q: bool, - flag_v: bool, - flag_git_dir: Option, -} - -fn run(args: &Args) -> Result<(), git2::Error> { - let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); - let repo = try!(Repository::open(path)); - - let obj = try!(repo.revparse_single(&args.arg_object)); - if args.flag_v && !args.flag_q { - println!("{} {}\n--", obj.kind().unwrap().str(), obj.id()); - } - - if args.flag_t { - println!("{}", obj.kind().unwrap().str()); - } else if args.flag_s { - /* ... */ - } else if args.flag_e { - /* ... */ - } else if args.flag_p { - match obj.kind() { - Some(ObjectType::Blob) => { - show_blob(obj.as_blob().unwrap()); - } - Some(ObjectType::Commit) => { - show_commit(obj.as_commit().unwrap()); - } - Some(ObjectType::Tag) => { - show_tag(obj.as_tag().unwrap()); - } - Some(ObjectType::Tree) => { - show_tree(obj.as_tree().unwrap()); - } - Some(ObjectType::Any) | None => { - println!("unknown {}", obj.id()) - } - } - } - Ok(()) -} - -fn show_blob(blob: &Blob) { - io::stdout().write_all(blob.content()).unwrap(); -} - -fn show_commit(commit: &Commit) { - println!("tree {}", commit.tree_id()); - for parent in commit.parent_ids() { - println!("parent {}", parent); - } - show_sig("author", Some(commit.author())); - show_sig("committer", Some(commit.committer())); - if let Some(msg) = commit.message() { - println!("\n{}", msg); - } -} - -fn show_tag(tag: &Tag) { - println!("object {}", tag.target_id()); - println!("type {}", tag.target_type().unwrap().str()); - println!("tag {}", tag.name().unwrap()); - show_sig("tagger", tag.tagger()); - - if let Some(msg) = tag.message() { - println!("\n{}", msg); - } -} - -fn show_tree(tree: &Tree) { - for entry in tree.iter() { - println!("{:06o} {} {}\t{}", - entry.filemode(), - entry.kind().unwrap().str(), - entry.id(), - entry.name().unwrap()); - } -} - -fn show_sig(header: &str, sig: Option) { - let sig = match sig { Some(s) => s, None => return }; - let offset = sig.when().offset_minutes(); - let (sign, offset) = if offset < 0 {('-', -offset)} else {('+', offset)}; - let (hours, minutes) = (offset / 60, offset % 60); - println!("{} {} {} {}{:02}{:02}", - header, sig, sig.when().seconds(), sign, hours, minutes); - -} - -fn main() { - const USAGE: &'static str = " -usage: cat-file (-t | -s | -e | -p) [options] - -Options: - -t show the object type - -s show the object size - -e suppress all output - -p pretty print the contents of the object - -q suppress output - -v use verbose output - --git-dir use the specified directory as the base directory - -h, --help show this message -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/clone.rs cargo-0.19.0/vendor/git2-0.6.3/examples/clone.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/clone.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/clone.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -/* - * libgit2 "clone" example - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use docopt::Docopt; -use git2::build::{RepoBuilder, CheckoutBuilder}; -use git2::{RemoteCallbacks, Progress, FetchOptions}; -use std::cell::RefCell; -use std::io::{self, Write}; -use std::path::{Path, PathBuf}; - -#[derive(RustcDecodable)] -struct Args { - arg_url: String, - arg_path: String, -} - -struct State { - progress: Option>, - total: usize, - current: usize, - path: Option, - newline: bool, -} - -fn print(state: &mut State) { - let stats = state.progress.as_ref().unwrap(); - let network_pct = (100 * stats.received_objects()) / stats.total_objects(); - let index_pct = (100 * stats.indexed_objects()) / stats.total_objects(); - let co_pct = if state.total > 0 { - (100 * state.current) / state.total - } else { - 0 - }; - let kbytes = stats.received_bytes() / 1024; - if stats.received_objects() == stats.total_objects() && false { - if !state.newline { - println!(""); - state.newline = true; - } - print!("Resolving deltas {}/{}\r", stats.indexed_deltas(), - stats.total_deltas()); - } else { - print!("net {:3}% ({:4} kb, {:5}/{:5}) / idx {:3}% ({:5}/{:5}) \ - / chk {:3}% ({:4}/{:4}) {}\r", - network_pct, kbytes, stats.received_objects(), - stats.total_objects(), - index_pct, stats.indexed_objects(), stats.total_objects(), - co_pct, state.current, state.total, - state.path.as_ref().map(|s| s.to_string_lossy().into_owned()) - .unwrap_or(String::new())); - } - io::stdout().flush().unwrap(); -} - -fn run(args: &Args) -> Result<(), git2::Error> { - let state = RefCell::new(State { - progress: None, - total: 0, - current: 0, - path: None, - newline: false, - }); - let mut cb = RemoteCallbacks::new(); - cb.transfer_progress(|stats| { - let mut state = state.borrow_mut(); - state.progress = Some(stats.to_owned()); - print(&mut *state); - true - }); - - let mut co = CheckoutBuilder::new(); - co.progress(|path, cur, total| { - let mut state = state.borrow_mut(); - state.path = path.map(|p| p.to_path_buf()); - state.current = cur; - state.total = total; - print(&mut *state); - }); - - let mut fo = FetchOptions::new(); - fo.remote_callbacks(cb); - try!(RepoBuilder::new().fetch_options(fo).with_checkout(co) - .clone(&args.arg_url, Path::new(&args.arg_path))); - println!(""); - - Ok(()) -} - -fn main() { - const USAGE: &'static str = " -usage: add [options] - -Options: - -h, --help show this message -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} - diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/diff.rs cargo-0.19.0/vendor/git2-0.6.3/examples/diff.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/diff.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/diff.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,283 +0,0 @@ -/* - * libgit2 "diff" example - shows how to use the diff API - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use std::str; - -use docopt::Docopt; -use git2::{Repository, Error, Object, ObjectType, DiffOptions, Diff}; -use git2::{DiffFindOptions, DiffFormat}; - -#[derive(RustcDecodable)] #[allow(non_snake_case)] -struct Args { - arg_from_oid: Option, - arg_to_oid: Option, - flag_patch: bool, - flag_cached: bool, - flag_nocached: bool, - flag_name_only: bool, - flag_name_status: bool, - flag_raw: bool, - flag_format: Option, - flag_color: bool, - flag_no_color: bool, - flag_R: bool, - flag_text: bool, - flag_ignore_space_at_eol: bool, - flag_ignore_space_change: bool, - flag_ignore_all_space: bool, - flag_ignored: bool, - flag_untracked: bool, - flag_patience: bool, - flag_minimal: bool, - flag_stat: bool, - flag_numstat: bool, - flag_shortstat: bool, - flag_summary: bool, - flag_find_renames: Option, - flag_find_copies: Option, - flag_find_copies_harder: bool, - flag_break_rewrites: bool, - flag_unified: Option, - flag_inter_hunk_context: Option, - flag_abbrev: Option, - flag_src_prefix: Option, - flag_dst_prefix: Option, - flag_git_dir: Option, -} - -const RESET: &'static str = "\u{1b}[m"; -const BOLD: &'static str = "\u{1b}[1m"; -const RED: &'static str = "\u{1b}[31m"; -const GREEN: &'static str = "\u{1b}[32m"; -const CYAN: &'static str = "\u{1b}[36m"; - -#[derive(PartialEq, Eq, Copy, Clone)] -enum Cache { Normal, Only, None } - -fn run(args: &Args) -> Result<(), Error> { - let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); - let repo = try!(Repository::open(path)); - - // Prepare our diff options based on the arguments given - let mut opts = DiffOptions::new(); - opts.reverse(args.flag_R) - .force_text(args.flag_text) - .ignore_whitespace_eol(args.flag_ignore_space_at_eol) - .ignore_whitespace_change(args.flag_ignore_space_change) - .ignore_whitespace(args.flag_ignore_all_space) - .include_ignored(args.flag_ignored) - .include_untracked(args.flag_untracked) - .patience(args.flag_patience) - .minimal(args.flag_minimal); - if let Some(amt) = args.flag_unified { opts.context_lines(amt); } - if let Some(amt) = args.flag_inter_hunk_context { opts.interhunk_lines(amt); } - if let Some(amt) = args.flag_abbrev { opts.id_abbrev(amt); } - if let Some(ref s) = args.flag_src_prefix { opts.old_prefix(&s); } - if let Some(ref s) = args.flag_dst_prefix { opts.new_prefix(&s); } - if let Some("diff-index") = args.flag_format.as_ref().map(|s| &s[..]) { - opts.id_abbrev(40); - } - - // Prepare the diff to inspect - let t1 = try!(tree_to_treeish(&repo, args.arg_from_oid.as_ref())); - let t2 = try!(tree_to_treeish(&repo, args.arg_to_oid.as_ref())); - let head = try!(tree_to_treeish(&repo, Some(&"HEAD".to_string()))).unwrap(); - let mut diff = match (t1, t2, args.cache()) { - (Some(t1), Some(t2), _) => { - try!(repo.diff_tree_to_tree(t1.as_tree(), t2.as_tree(), - Some(&mut opts))) - } - (t1, None, Cache::None) => { - let t1 = t1.unwrap_or(head); - try!(repo.diff_tree_to_workdir(t1.as_tree(), Some(&mut opts))) - } - (t1, None, Cache::Only) => { - let t1 = t1.unwrap_or(head); - try!(repo.diff_tree_to_index(t1.as_tree(), None, Some(&mut opts))) - } - (Some(t1), None, _) => { - try!(repo.diff_tree_to_workdir_with_index(t1.as_tree(), - Some(&mut opts))) - } - (None, None, _) => { - try!(repo.diff_index_to_workdir(None, Some(&mut opts))) - } - (None, Some(_), _) => unreachable!(), - }; - - // Apply rename and copy detection if requested - if args.flag_break_rewrites || args.flag_find_copies_harder || - args.flag_find_renames.is_some() || args.flag_find_copies.is_some() - { - let mut opts = DiffFindOptions::new(); - if let Some(t) = args.flag_find_renames { - opts.rename_threshold(t); - opts.renames(true); - } - if let Some(t) = args.flag_find_copies { - opts.copy_threshold(t); - opts.copies(true); - } - opts.copies_from_unmodified(args.flag_find_copies_harder) - .rewrites(args.flag_break_rewrites); - try!(diff.find_similar(Some(&mut opts))); - } - - // Generate simple output - let stats = args.flag_stat | args.flag_numstat | args.flag_shortstat | - args.flag_summary; - if stats { - try!(print_stats(&diff, args)); - } - if args.flag_patch || !stats { - if args.color() { print!("{}", RESET); } - let mut last_color = None; - try!(diff.print(args.diff_format(), |_delta, _hunk, line| { - if args.color() { - let next = match line.origin() { - '+' => Some(GREEN), - '-' => Some(RED), - '>' => Some(GREEN), - '<' => Some(RED), - 'F' => Some(BOLD), - 'H' => Some(CYAN), - _ => None - }; - if args.color() && next != last_color { - if last_color == Some(BOLD) || next == Some(BOLD) { - print!("{}", RESET); - } - print!("{}", next.unwrap_or(RESET)); - last_color = next; - } - } - - match line.origin() { - '+' | '-' | ' ' => print!("{}", line.origin()), - _ => {} - } - print!("{}", str::from_utf8(line.content()).unwrap()); - true - })); - if args.color() { print!("{}", RESET); } - } - - Ok(()) -} - -fn print_stats(diff: &Diff, args: &Args) -> Result<(), Error> { - let stats = try!(diff.stats()); - let mut format = git2::DIFF_STATS_NONE; - if args.flag_stat { - format = format | git2::DIFF_STATS_FULL; - } - if args.flag_shortstat { - format = format | git2::DIFF_STATS_SHORT; - } - if args.flag_numstat { - format = format | git2::DIFF_STATS_NUMBER; - } - if args.flag_summary { - format = format | git2::DIFF_STATS_INCLUDE_SUMMARY; - } - let buf = try!(stats.to_buf(format, 80)); - print!("{}", str::from_utf8(&*buf).unwrap()); - Ok(()) -} - -fn tree_to_treeish<'a>(repo: &'a Repository, arg: Option<&String>) - -> Result>, Error> { - let arg = match arg { Some(s) => s, None => return Ok(None) }; - let obj = try!(repo.revparse_single(arg)); - let tree = try!(obj.peel(ObjectType::Tree)); - Ok(Some(tree)) -} - -impl Args { - fn cache(&self) -> Cache { - if self.flag_cached {Cache::Only} - else if self.flag_nocached {Cache::None} - else {Cache::Normal} - } - fn color(&self) -> bool { self.flag_color && !self.flag_no_color } - fn diff_format(&self) -> DiffFormat { - if self.flag_patch {DiffFormat::Patch} - else if self.flag_name_only {DiffFormat::NameOnly} - else if self.flag_name_status {DiffFormat::NameStatus} - else if self.flag_raw {DiffFormat::Raw} - else { - match self.flag_format.as_ref().map(|s| &s[..]) { - Some("name") => DiffFormat::NameOnly, - Some("name-status") => DiffFormat::NameStatus, - Some("raw") => DiffFormat::Raw, - Some("diff-index") => DiffFormat::Raw, - _ => DiffFormat::Patch, - } - } - } -} - -fn main() { - const USAGE: &'static str = " -usage: diff [options] [ []] - -Options: - -p, --patch show output in patch format - --cached use staged changes as diff - --nocached do not use staged changes - --name-only show only names of changed files - --name-status show only names and status changes - --raw generate the raw format - --format= specify format for stat summary - --color use color output - --no-color never use color output - -R swap two inputs - -a, --text treat all files as text - --ignore-space-at-eol ignore changes in whitespace at EOL - -b, --ignore-space-change ignore changes in amount of whitespace - -w, --ignore-all-space ignore whitespace when comparing lines - --ignored show ignored files as well - --untracked show untracked files - --patience generate diff using the patience algorithm - --minimal spend extra time to find smallest diff - --stat generate a diffstat - --numstat similar to --stat, but more machine friendly - --shortstat only output last line of --stat - --summary output condensed summary of header info - -M, --find-renames set threshold for findind renames (default 50) - -C, --find-copies set threshold for finding copies (default 50) - --find-copies-harder inspect unmodified files for sources of copies - -B, --break-rewrites break complete rewrite changes into pairs - -U, --unified lints of context to show - --inter-hunk-context maximum lines of change between hunks - --abbrev length to abbreviate commits to - --src-prefix show given source prefix instead of 'a/' - --dst-prefix show given destinction prefix instead of 'b/' - --git-dir path to git repository to use - -h, --help show this message -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/fetch.rs cargo-0.19.0/vendor/git2-0.6.3/examples/fetch.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/fetch.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/fetch.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,131 +0,0 @@ -/* - * libgit2 "fetch" example - shows how to fetch remote data - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use docopt::Docopt; -use git2::{Repository, RemoteCallbacks, Direction, AutotagOption, FetchOptions}; -use std::io::{self, Write}; -use std::str; - -#[derive(RustcDecodable)] -struct Args { - arg_remote: Option, -} - -fn run(args: &Args) -> Result<(), git2::Error> { - let repo = try!(Repository::open(".")); - let remote = args.arg_remote.as_ref().map(|s| &s[..]).unwrap_or("origin"); - - // Figure out whether it's a named remote or a URL - println!("Fetching {} for repo", remote); - let mut cb = RemoteCallbacks::new(); - let mut remote = try!(repo.find_remote(remote).or_else(|_| { - repo.remote_anonymous(remote) - })); - cb.sideband_progress(|data| { - print!("remote: {}", str::from_utf8(data).unwrap()); - io::stdout().flush().unwrap(); - true - }); - - // This callback gets called for each remote-tracking branch that gets - // updated. The message we output depends on whether it's a new one or an - // update. - cb.update_tips(|refname, a, b| { - if a.is_zero() { - println!("[new] {:20} {}", b, refname); - } else { - println!("[updated] {:10}..{:10} {}", a, b, refname); - } - true - }); - - // Here we show processed and total objects in the pack and the amount of - // received data. Most frontends will probably want to show a percentage and - // the download rate. - cb.transfer_progress(|stats| { - if stats.received_objects() == stats.total_objects() { - print!("Resolving deltas {}/{}\r", stats.indexed_deltas(), - stats.total_deltas()); - } else if stats.total_objects() > 0 { - print!("Received {}/{} objects ({}) in {} bytes\r", - stats.received_objects(), - stats.total_objects(), - stats.indexed_objects(), - stats.received_bytes()); - } - io::stdout().flush().unwrap(); - true - }); - - // Connect to the remote end specifying that we want to fetch information - // from it. - try!(remote.connect(Direction::Fetch)); - - // Download the packfile and index it. This function updates the amount of - // received data and the indexer stats which lets you inform the user about - // progress. - let mut fo = FetchOptions::new(); - fo.remote_callbacks(cb); - try!(remote.download(&[], Some(&mut fo))); - - { - // If there are local objects (we got a thin pack), then tell the user - // how many objects we saved from having to cross the network. - let stats = remote.stats(); - if stats.local_objects() > 0 { - println!("\rReceived {}/{} objects in {} bytes (used {} local \ - objects)", stats.indexed_objects(), - stats.total_objects(), stats.received_bytes(), - stats.local_objects()); - } else { - println!("\rReceived {}/{} objects in {} bytes", - stats.indexed_objects(), stats.total_objects(), - stats.received_bytes()); - } - } - - // Disconnect the underlying connection to prevent from idling. - remote.disconnect(); - - // Update the references in the remote's namespace to point to the right - // commits. This may be needed even if there was no packfile to download, - // which can happen e.g. when the branches have been changed but all the - // needed objects are available locally. - try!(remote.update_tips(None, true, - AutotagOption::Unspecified, None)); - - Ok(()) -} - -fn main() { - const USAGE: &'static str = " -usage: fetch [options] [] - -Options: - -h, --help show this message -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/init.rs cargo-0.19.0/vendor/git2-0.6.3/examples/init.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/init.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/init.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,151 +0,0 @@ -/* - * libgit2 "init" example - shows how to initialize a new repo - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use docopt::Docopt; -use git2::{Repository, RepositoryInitOptions, RepositoryInitMode, Error}; -use std::path::{PathBuf, Path}; - -#[derive(RustcDecodable)] -struct Args { - arg_directory: String, - flag_quiet: bool, - flag_bare: bool, - flag_template: Option, - flag_separate_git_dir: Option, - flag_initial_commit: bool, - flag_shared: Option, -} - -fn run(args: &Args) -> Result<(), Error> { - let mut path = PathBuf::from(&args.arg_directory); - let repo = if !args.flag_bare && args.flag_template.is_none() && - args.flag_shared.is_none() && - args.flag_separate_git_dir.is_none() { - try!(Repository::init(&path)) - } else { - let mut opts = RepositoryInitOptions::new(); - opts.bare(args.flag_bare); - if let Some(ref s) = args.flag_template { - opts.template_path(Path::new(s)); - } - - // If you specified a separate git directory, then initialize - // the repository at that path and use the second path as the - // working directory of the repository (with a git-link file) - if let Some(ref s) = args.flag_separate_git_dir { - opts.workdir_path(&path); - path = PathBuf::from(s); - } - - if let Some(ref s) = args.flag_shared { - opts.mode(try!(parse_shared(&s))); - } - try!(Repository::init_opts(&path, &opts)) - }; - - // Print a message to stdout like "git init" does - if !args.flag_quiet { - if args.flag_bare || args.flag_separate_git_dir.is_some() { - path = repo.path().to_path_buf(); - } else { - path = repo.workdir().unwrap().to_path_buf(); - } - println!("Initialized empty Git repository in {}", path.display()); - } - - if args.flag_initial_commit { - try!(create_initial_commit(&repo)); - println!("Created empty initial commit"); - } - - Ok(()) -} - -/// Unlike regular "git init", this example shows how to create an initial empty -/// commit in the repository. This is the helper function that does that. -fn create_initial_commit(repo: &Repository) -> Result<(), Error> { - // First use the config to initialize a commit signature for the user. - let sig = try!(repo.signature()); - - // Now let's create an empty tree for this commit - let tree_id = { - let mut index = try!(repo.index()); - - // Outside of this example, you could call index.add_path() - // here to put actual files into the index. For our purposes, we'll - // leave it empty for now. - - try!(index.write_tree()) - }; - - let tree = try!(repo.find_tree(tree_id)); - - // Ready to create the initial commit. - // - // Normally creating a commit would involve looking up the current HEAD - // commit and making that be the parent of the initial commit, but here this - // is the first commit so there will be no parent. - try!(repo.commit(Some("HEAD"), &sig, &sig, "Initial commit", &tree, &[])); - - Ok(()) -} - -fn parse_shared(shared: &str) -> Result { - match shared { - "false" | "umask" => Ok(git2::REPOSITORY_INIT_SHARED_UMASK), - "true" | "group" => Ok(git2::REPOSITORY_INIT_SHARED_GROUP), - "all" | "world" => Ok(git2::REPOSITORY_INIT_SHARED_ALL), - _ => { - if shared.starts_with("0") { - match u32::from_str_radix(&shared[1..], 8).ok() { - Some(n) => { - return Ok(RepositoryInitMode::from_bits_truncate(n)) - } - None => { - Err(Error::from_str("invalid octal value for --shared")) - } - } - } else { - Err(Error::from_str("unknown value for --shared")) - } - } - } -} - -fn main() { - const USAGE: &'static str = " -usage: init [options] - -Options: - -q, --quiet don't print information to stdout - --bare initialize a new bare repository - --template use as an initialization template - --separate-git-dir use as the .git directory - --initial-commit create an initial empty commit - --shared permissions to create the repository with -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/log.rs cargo-0.19.0/vendor/git2-0.6.3/examples/log.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/log.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/log.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,262 +0,0 @@ -/* - * libgit2 "log" example - shows how to walk history and get commit info - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate rustc_serialize; -extern crate docopt; -extern crate git2; -extern crate time; - -use std::str; -use docopt::Docopt; -use git2::{Repository, Signature, Commit, ObjectType, Time, DiffOptions}; -use git2::{Pathspec, Error, DiffFormat}; - -#[derive(RustcDecodable)] -struct Args { - arg_commit: Vec, - arg_spec: Vec, - flag_topo_order: bool, - flag_date_order: bool, - flag_reverse: bool, - flag_author: Option, - flag_committer: Option, - flag_grep: Option, - flag_git_dir: Option, - flag_skip: Option, - flag_max_count: Option, - flag_merges: bool, - flag_no_merges: bool, - flag_no_min_parents: bool, - flag_no_max_parents: bool, - flag_max_parents: Option, - flag_min_parents: Option, - flag_patch: bool, -} - -fn run(args: &Args) -> Result<(), Error> { - let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); - let repo = try!(Repository::open(path)); - let mut revwalk = try!(repo.revwalk()); - - // Prepare the revwalk based on CLI parameters - let base = if args.flag_reverse {git2::SORT_REVERSE} else {git2::SORT_NONE}; - revwalk.set_sorting(base | if args.flag_topo_order { - git2::SORT_TOPOLOGICAL - } else if args.flag_date_order { - git2::SORT_TIME - } else { - git2::SORT_NONE - }); - for commit in args.arg_commit.iter() { - if commit.starts_with("^") { - let obj = try!(repo.revparse_single(&commit[1..])); - try!(revwalk.hide(obj.id())); - continue - } - let revspec = try!(repo.revparse(&commit)); - if revspec.mode().contains(git2::REVPARSE_SINGLE) { - try!(revwalk.push(revspec.from().unwrap().id())); - } else { - let from = revspec.from().unwrap().id(); - let to = revspec.to().unwrap().id(); - try!(revwalk.push(to)); - if revspec.mode().contains(git2::REVPARSE_MERGE_BASE) { - let base = try!(repo.merge_base(from, to)); - let o = try!(repo.find_object(base, Some(ObjectType::Commit))); - try!(revwalk.push(o.id())); - } - try!(revwalk.hide(from)); - } - } - if args.arg_commit.len() == 0 { - try!(revwalk.push_head()); - } - - // Prepare our diff options and pathspec matcher - let (mut diffopts, mut diffopts2) = (DiffOptions::new(), DiffOptions::new()); - for spec in args.arg_spec.iter() { - diffopts.pathspec(spec); - diffopts2.pathspec(spec); - } - let ps = try!(Pathspec::new(args.arg_spec.iter())); - - // Filter our revwalk based on the CLI parameters - macro_rules! filter_try { - ($e:expr) => (match $e { Ok(t) => t, Err(e) => return Some(Err(e)) }) - } - let revwalk = revwalk.filter_map(|id| { - let id = filter_try!(id); - let commit = filter_try!(repo.find_commit(id)); - let parents = commit.parents().len(); - if parents < args.min_parents() { return None } - if let Some(n) = args.max_parents() { - if parents >= n { return None } - } - if args.arg_spec.len() > 0 { - match commit.parents().len() { - 0 => { - let tree = filter_try!(commit.tree()); - let flags = git2::PATHSPEC_NO_MATCH_ERROR; - if ps.match_tree(&tree, flags).is_err() { return None } - } - _ => { - let m = commit.parents().all(|parent| { - match_with_parent(&repo, &commit, &parent, &mut diffopts) - .unwrap_or(false) - }); - if !m { return None } - } - } - } - if !sig_matches(commit.author(), &args.flag_author) { return None } - if !sig_matches(commit.committer(), &args.flag_committer) { return None } - if !log_message_matches(commit.message(), &args.flag_grep) { return None } - Some(Ok(commit)) - }).skip(args.flag_skip.unwrap_or(0)).take(args.flag_max_count.unwrap_or(!0)); - - // print! - for commit in revwalk { - let commit = try!(commit); - print_commit(&commit); - if !args.flag_patch || commit.parents().len() > 1 { continue } - let a = if commit.parents().len() == 1 { - let parent = try!(commit.parent(0)); - Some(try!(parent.tree())) - } else { - None - }; - let b = try!(commit.tree()); - let diff = try!(repo.diff_tree_to_tree(a.as_ref(), Some(&b), - Some(&mut diffopts2))); - try!(diff.print(DiffFormat::Patch, |_delta, _hunk, line| { - match line.origin() { - ' ' | '+' | '-' => print!("{}", line.origin()), - _ => {} - } - print!("{}", str::from_utf8(line.content()).unwrap()); - true - })); - } - - Ok(()) -} - -fn sig_matches(sig: Signature, arg: &Option) -> bool { - match *arg { - Some(ref s) => { - sig.name().map(|n| n.contains(s)).unwrap_or(false) || - sig.email().map(|n| n.contains(s)).unwrap_or(false) - } - None => true - } -} - -fn log_message_matches(msg: Option<&str>, grep: &Option) -> bool { - match (grep, msg) { - (&None, _) => true, - (&Some(_), None) => false, - (&Some(ref s), Some(msg)) => msg.contains(s), - } -} - -fn print_commit(commit: &Commit) { - println!("commit {}", commit.id()); - - if commit.parents().len() > 1 { - print!("Merge:"); - for id in commit.parent_ids() { - print!(" {:.8}", id); - } - println!(""); - } - - let author = commit.author(); - println!("Author: {}", author); - print_time(&author.when(), "Date: "); - println!(""); - - for line in String::from_utf8_lossy(commit.message_bytes()).lines() { - println!(" {}", line); - } - println!(""); -} - -fn print_time(time: &Time, prefix: &str) { - let (offset, sign) = match time.offset_minutes() { - n if n < 0 => (-n, '-'), - n => (n, '+'), - }; - let (hours, minutes) = (offset / 60, offset % 60); - let ts = time::Timespec::new(time.seconds() + - (time.offset_minutes() as i64) * 60, 0); - let time = time::at(ts); - - println!("{}{} {}{:02}{:02}", prefix, - time.strftime("%a %b %e %T %Y").unwrap(), sign, hours, minutes); - -} - -fn match_with_parent(repo: &Repository, commit: &Commit, parent: &Commit, - opts: &mut DiffOptions) -> Result { - let a = try!(parent.tree()); - let b = try!(commit.tree()); - let diff = try!(repo.diff_tree_to_tree(Some(&a), Some(&b), Some(opts))); - Ok(diff.deltas().len() > 0) -} - -impl Args { - fn min_parents(&self) -> usize { - if self.flag_no_min_parents { return 0 } - self.flag_min_parents.unwrap_or(if self.flag_merges {2} else {0}) - } - - fn max_parents(&self) -> Option { - if self.flag_no_max_parents { return None } - self.flag_max_parents.or(if self.flag_no_merges {Some(1)} else {None}) - } -} - -fn main() { - const USAGE: &'static str = " -usage: log [options] [..] [--] [..] - -Options: - --topo-order sort commits in topological order - --date-order sort commits in date order - --reverse sort commits in reverse - --author author to sort by - --committer committer to sort by - --grep pattern to filter commit messages by - --git-dir alternative git directory to use - --skip number of commits to skip - -n, --max-count maximum number of commits to show - --merges only show merge commits - --no-merges don't show merge commits - --no-min-parents don't require a minimum number of parents - --no-max-parents don't require a maximum number of parents - --max-parents specify a maximum number of parents for a commit - --min-parents specify a minimum number of parents for a commit - -p, --patch show commit diff - -h, --help show this message -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/ls-remote.rs cargo-0.19.0/vendor/git2-0.6.3/examples/ls-remote.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/ls-remote.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/ls-remote.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ -/* - * libgit2 "ls-remote" example - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use docopt::Docopt; -use git2::{Repository, Direction}; - -#[derive(RustcDecodable)] -struct Args { - arg_remote: String, -} - -fn run(args: &Args) -> Result<(), git2::Error> { - let repo = try!(Repository::open(".")); - let remote = &args.arg_remote; - let mut remote = try!(repo.find_remote(remote).or_else(|_| { - repo.remote_anonymous(remote) - })); - - // Connect to the remote and call the printing function for each of the - // remote references. - try!(remote.connect(Direction::Fetch)); - - // Get the list of references on the remote and print out their name next to - // what they point to. - for head in try!(remote.list()).iter() { - println!("{}\t{}", head.oid(), head.name()); - } - - Ok(()) -} - -fn main() { - const USAGE: &'static str = " -usage: ls-remote [option] - -Options: - -h, --help show this message -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/rev-list.rs cargo-0.19.0/vendor/git2-0.6.3/examples/rev-list.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/rev-list.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/rev-list.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ -/* - * libgit2 "rev-list" example - shows how to transform a rev-spec into a list - * of commit ids - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use docopt::Docopt; -use git2::{Repository, Error, Revwalk, Oid}; - -#[derive(RustcDecodable)] -struct Args { - arg_spec: Vec, - flag_topo_order: bool, - flag_date_order: bool, - flag_reverse: bool, - flag_not: Vec, -} - -fn run(args: &Args) -> Result<(), git2::Error> { - let repo = try!(Repository::open(".")); - let mut revwalk = try!(repo.revwalk()); - - let base = if args.flag_reverse {git2::SORT_REVERSE} else {git2::SORT_NONE}; - revwalk.set_sorting(base | if args.flag_topo_order { - git2::SORT_TOPOLOGICAL - } else if args.flag_date_order { - git2::SORT_TIME - } else { - git2::SORT_NONE - }); - - let specs = args.flag_not.iter().map(|s| (s, true)) - .chain(args.arg_spec.iter().map(|s| (s, false))) - .map(|(spec, hide)| { - if spec.starts_with("^") {(&spec[1..], !hide)} else {(&spec[..], hide)} - }); - for (spec, hide) in specs { - let id = if spec.contains("..") { - let revspec = try!(repo.revparse(spec)); - if revspec.mode().contains(git2::REVPARSE_MERGE_BASE) { - return Err(Error::from_str("merge bases not implemented")) - } - try!(push(&mut revwalk, revspec.from().unwrap().id(), !hide)); - revspec.to().unwrap().id() - } else { - try!(repo.revparse_single(spec)).id() - }; - try!(push(&mut revwalk, id, hide)); - } - - for id in revwalk { - let id = try!(id); - println!("{}", id); - } - Ok(()) -} - -fn push(revwalk: &mut Revwalk, id: Oid, hide: bool) -> Result<(), Error> { - if hide {revwalk.hide(id)} else {revwalk.push(id)} -} - -fn main() { - const USAGE: &'static str = " -usage: rev-list [options] [--] ... - -Options: - --topo-order sort commits in topological order - --date-order sort commits in date order - --reverse sort commits in reverse - --not don't show - -h, --help show this message -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} - diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/rev-parse.rs cargo-0.19.0/vendor/git2-0.6.3/examples/rev-parse.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/rev-parse.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/rev-parse.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -/* - * libgit2 "rev-parse" example - shows how to parse revspecs - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use docopt::Docopt; -use git2::Repository; - -#[derive(RustcDecodable)] -struct Args { - arg_spec: String, - flag_git_dir: Option, -} - -fn run(args: &Args) -> Result<(), git2::Error> { - let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); - let repo = try!(Repository::open(path)); - - let revspec = try!(repo.revparse(&args.arg_spec)); - - if revspec.mode().contains(git2::REVPARSE_SINGLE) { - println!("{}", revspec.from().unwrap().id()); - } else if revspec.mode().contains(git2::REVPARSE_RANGE) { - let to = revspec.to().unwrap(); - let from = revspec.from().unwrap(); - println!("{}", to.id()); - - if revspec.mode().contains(git2::REVPARSE_MERGE_BASE) { - let base = try!(repo.merge_base(from.id(), to.id())); - println!("{}", base); - } - - println!("^{}", from.id()); - } else { - return Err(git2::Error::from_str("invalid results from revparse")) - } - Ok(()) -} - -fn main() { - const USAGE: &'static str = " -usage: rev-parse [options] - -Options: - --git-dir directory for the git repository to check -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/status.rs cargo-0.19.0/vendor/git2-0.6.3/examples/status.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/status.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/status.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,373 +0,0 @@ -/* - * libgit2 "status" example - shows how to use the status APIs - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use std::str; -use std::time::Duration; -use docopt::Docopt; -use git2::{Repository, Error, StatusOptions, ErrorCode, SubmoduleIgnore}; - -#[derive(RustcDecodable)] -struct Args { - arg_spec: Vec, - flag_short: bool, - flag_long: bool, - flag_porcelain: bool, - flag_branch: bool, - flag_z: bool, - flag_ignored: bool, - flag_untracked_files: Option, - flag_ignore_submodules: Option, - flag_git_dir: Option, - flag_repeat: bool, - flag_list_submodules: bool, -} - -#[derive(Eq, PartialEq)] -enum Format { Long, Short, Porcelain } - -fn run(args: &Args) -> Result<(), Error> { - let path = args.flag_git_dir.clone().unwrap_or(".".to_string()); - let repo = try!(Repository::open(&path)); - if repo.is_bare() { - return Err(Error::from_str("cannot report status on bare repository")) - } - - let mut opts = StatusOptions::new(); - opts.include_ignored(args.flag_ignored); - match args.flag_untracked_files.as_ref().map(|s| &s[..]) { - Some("no") => { opts.include_untracked(false); } - Some("normal") => { opts.include_untracked(true); } - Some("all") => { - opts.include_untracked(true).recurse_untracked_dirs(true); - } - Some(_) => return Err(Error::from_str("invalid untracked-files value")), - None => {} - } - match args.flag_ignore_submodules.as_ref().map(|s| &s[..]) { - Some("all") => { opts.exclude_submodules(true); } - Some(_) => return Err(Error::from_str("invalid ignore-submodules value")), - None => {} - } - opts.include_untracked(!args.flag_ignored); - for spec in args.arg_spec.iter() { - opts.pathspec(spec); - } - - loop { - if args.flag_repeat { - println!("\u{1b}[H\u{1b}[2J"); - } - - let statuses = try!(repo.statuses(Some(&mut opts))); - - if args.flag_branch { - try!(show_branch(&repo, args.format())); - } - if args.flag_list_submodules { - try!(print_submodules(&repo)); - } - - if args.format() == Format::Long { - print_long(statuses); - } else { - print_short(&repo, statuses); - } - - if args.flag_repeat { - std::thread::sleep(Duration::new(10, 0)); - } else { - return Ok(()) - } - } -} - -fn show_branch(repo: &Repository, format: Format) -> Result<(), Error> { - let head = match repo.head() { - Ok(head) => Some(head), - Err(ref e) if e.code() == ErrorCode::UnbornBranch || - e.code() == ErrorCode::NotFound => None, - Err(e) => return Err(e), - }; - let head = head.as_ref().and_then(|h| h.shorthand()); - - if format == Format::Long { - println!("# On branch {}", - head.unwrap_or("Not currently on any branch")); - } else { - println!("## {}", head.unwrap_or("HEAD (no branch)")); - } - Ok(()) -} - -fn print_submodules(repo: &Repository) -> Result<(), Error> { - let modules = try!(repo.submodules()); - println!("# Submodules"); - for sm in modules.iter() { - println!("# - submodule '{}' at {}", sm.name().unwrap(), - sm.path().display()); - } - Ok(()) -} - -// This function print out an output similar to git's status command in long -// form, including the command-line hints. -fn print_long(statuses: git2::Statuses) { - let mut header = false; - let mut rm_in_workdir = false; - let mut changes_in_index = false; - let mut changed_in_workdir = false; - - // Print index changes - for entry in statuses.iter().filter(|e| e.status() != git2::STATUS_CURRENT) { - if entry.status().contains(git2::STATUS_WT_DELETED) { - rm_in_workdir = true; - } - let istatus = match entry.status() { - s if s.contains(git2::STATUS_INDEX_NEW) => "new file: ", - s if s.contains(git2::STATUS_INDEX_MODIFIED) => "modified: ", - s if s.contains(git2::STATUS_INDEX_DELETED) => "deleted: ", - s if s.contains(git2::STATUS_INDEX_RENAMED) => "renamed: ", - s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => "typechange:", - _ => continue, - }; - if !header { - println!("\ -# Changes to be committed: -# (use \"git reset HEAD ...\" to unstage) -#"); - header = true; - } - - let old_path = entry.head_to_index().unwrap().old_file().path(); - let new_path = entry.head_to_index().unwrap().new_file().path(); - match (old_path, new_path) { - (Some(ref old), Some(ref new)) if old != new => { - println!("#\t{} {} -> {}", istatus, old.display(), - new.display()); - } - (old, new) => { - println!("#\t{} {}", istatus, old.or(new).unwrap().display()); - } - } - } - - if header { - changes_in_index = true; - println!("#"); - } - header = false; - - // Print workdir changes to tracked files - for entry in statuses.iter() { - // With `STATUS_OPT_INCLUDE_UNMODIFIED` (not used in this example) - // `index_to_workdir` may not be `None` even if there are no differences, - // in which case it will be a `Delta::Unmodified`. - if entry.status() == git2::STATUS_CURRENT || - entry.index_to_workdir().is_none() { - continue - } - - let istatus = match entry.status() { - s if s.contains(git2::STATUS_WT_MODIFIED) => "modified: ", - s if s.contains(git2::STATUS_WT_DELETED) => "deleted: ", - s if s.contains(git2::STATUS_WT_RENAMED) => "renamed: ", - s if s.contains(git2::STATUS_WT_TYPECHANGE) => "typechange:", - _ => continue, - }; - - if !header { - println!("\ -# Changes not staged for commit: -# (use \"git add{} ...\" to update what will be committed) -# (use \"git checkout -- ...\" to discard changes in working directory) -#\ - ", if rm_in_workdir {"/rm"} else {""}); - header = true; - } - - let old_path = entry.index_to_workdir().unwrap().old_file().path(); - let new_path = entry.index_to_workdir().unwrap().new_file().path(); - match (old_path, new_path) { - (Some(ref old), Some(ref new)) if old != new => { - println!("#\t{} {} -> {}", istatus, old.display(), - new.display()); - } - (old, new) => { - println!("#\t{} {}", istatus, old.or(new).unwrap().display()); - } - } - } - - if header { - changed_in_workdir = true; - println!("#"); - } - header = false; - - // Print untracked files - for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) { - if !header { - println!("\ -# Untracked files -# (use \"git add ...\" to include in what will be committed) -#"); - header = true; - } - let file = entry.index_to_workdir().unwrap().old_file().path().unwrap(); - println!("#\t{}", file.display()); - } - header = false; - - // Print ignored files - for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_IGNORED) { - if !header { - println!("\ -# Ignored files -# (use \"git add -f ...\" to include in what will be committed) -#"); - header = true; - } - let file = entry.index_to_workdir().unwrap().old_file().path().unwrap(); - println!("#\t{}", file.display()); - } - - if !changes_in_index && changed_in_workdir { - println!("no changes added to commit (use \"git add\" and/or \ - \"git commit -a\")"); - } -} - -// This version of the output prefixes each path with two status columns and -// shows submodule status information. -fn print_short(repo: &Repository, statuses: git2::Statuses) { - for entry in statuses.iter().filter(|e| e.status() != git2::STATUS_CURRENT) { - let mut istatus = match entry.status() { - s if s.contains(git2::STATUS_INDEX_NEW) => 'A', - s if s.contains(git2::STATUS_INDEX_MODIFIED) => 'M', - s if s.contains(git2::STATUS_INDEX_DELETED) => 'D', - s if s.contains(git2::STATUS_INDEX_RENAMED) => 'R', - s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => 'T', - _ => ' ', - }; - let mut wstatus = match entry.status() { - s if s.contains(git2::STATUS_WT_NEW) => { - if istatus == ' ' { istatus = '?'; } '?' - } - s if s.contains(git2::STATUS_WT_MODIFIED) => 'M', - s if s.contains(git2::STATUS_WT_DELETED) => 'D', - s if s.contains(git2::STATUS_WT_RENAMED) => 'R', - s if s.contains(git2::STATUS_WT_TYPECHANGE) => 'T', - _ => ' ', - }; - - if entry.status().contains(git2::STATUS_IGNORED) { - istatus = '!'; - wstatus = '!'; - } - if istatus == '?' && wstatus == '?' { continue } - let mut extra = ""; - - // A commit in a tree is how submodules are stored, so let's go take a - // look at its status. - // - // TODO: check for GIT_FILEMODE_COMMIT - let status = entry.index_to_workdir().and_then(|diff| { - let ignore = SubmoduleIgnore::Unspecified; - diff.new_file().path_bytes() - .and_then(|s| str::from_utf8(s).ok()) - .and_then(|name| repo.submodule_status(name, ignore).ok()) - }); - if let Some(status) = status { - if status.contains(git2::SUBMODULE_STATUS_WD_MODIFIED) { - extra = " (new commits)"; - } else if status.contains(git2::SUBMODULE_STATUS_WD_INDEX_MODIFIED) { - extra = " (modified content)"; - } else if status.contains(git2::SUBMODULE_STATUS_WD_WD_MODIFIED) { - extra = " (modified content)"; - } else if status.contains(git2::SUBMODULE_STATUS_WD_UNTRACKED) { - extra = " (untracked content)"; - } - } - - let (mut a, mut b, mut c) = (None, None, None); - if let Some(diff) = entry.head_to_index() { - a = diff.old_file().path(); - b = diff.new_file().path(); - } - if let Some(diff) = entry.index_to_workdir() { - a = a.or(diff.old_file().path()); - b = b.or(diff.old_file().path()); - c = diff.new_file().path(); - } - - match (istatus, wstatus) { - ('R', 'R') => println!("RR {} {} {}{}", a.unwrap().display(), - b.unwrap().display(), c.unwrap().display(), - extra), - ('R', w) => println!("R{} {} {}{}", w, a.unwrap().display(), - b.unwrap().display(), extra), - (i, 'R') => println!("{}R {} {}{}", i, a.unwrap().display(), - c.unwrap().display(), extra), - (i, w) => println!("{}{} {}{}", i, w, a.unwrap().display(), extra), - } - } - - for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) { - println!("?? {}", entry.index_to_workdir().unwrap().old_file() - .path().unwrap().display()); - } -} - -impl Args { - fn format(&self) -> Format { - if self.flag_short { Format::Short } - else if self.flag_long { Format::Long } - else if self.flag_porcelain { Format::Porcelain } - else if self.flag_z { Format::Porcelain } - else { Format::Long } - } -} - -fn main() { - const USAGE: &'static str = " -usage: status [options] [--] [..] - -Options: - -s, --short show short statuses - --long show longer statuses (default) - --porcelain ?? - -b, --branch show branch information - -z ?? - --ignored show ignored files as well - --untracked-files setting for showing untracked files [no|normal|all] - --ignore-submodules setting for ignoring submodules [all] - --git-dir git directory to analyze - --repeat repeatedly show status, sleeping inbetween - --list-submodules show submodules - -h, --help show this message -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/examples/tag.rs cargo-0.19.0/vendor/git2-0.6.3/examples/tag.rs --- cargo-0.17.0/vendor/git2-0.6.3/examples/tag.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/examples/tag.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,133 +0,0 @@ -/* - * libgit2 "tag" example - shows how to list, create and delete tags - * - * Written by the libgit2 contributors - * - * To the extent possible under law, the author(s) have dedicated all copyright - * and related and neighboring rights to this software to the public domain - * worldwide. This software is distributed without any warranty. - * - * You should have received a copy of the CC0 Public Domain Dedication along - * with this software. If not, see - * . - */ - -#![deny(warnings)] - -extern crate git2; -extern crate docopt; -extern crate rustc_serialize; - -use std::str; -use docopt::Docopt; -use git2::{Repository, Error, Tag, Commit}; - -#[derive(RustcDecodable)] -struct Args { - arg_tagname: Option, - arg_object: Option, - arg_pattern: Option, - flag_n: Option, - flag_force: bool, - flag_list: bool, - flag_delete: Option, - flag_message: Option, -} - -fn run(args: &Args) -> Result<(), Error> { - let repo = try!(Repository::open(".")); - - if let Some(ref name) = args.arg_tagname { - let target = args.arg_object.as_ref().map(|s| &s[..]).unwrap_or("HEAD"); - let obj = try!(repo.revparse_single(target)); - - if let Some(ref message) = args.flag_message { - let sig = try!(repo.signature()); - try!(repo.tag(&name, &obj, &sig, &message, args.flag_force)); - } else { - try!(repo.tag_lightweight(&name, &obj, args.flag_force)); - } - - } else if let Some(ref name) = args.flag_delete { - let obj = try!(repo.revparse_single(name)); - let id = try!(obj.short_id()); - try!(repo.tag_delete(name)); - println!("Deleted tag '{}' (was {})", name, - str::from_utf8(&*id).unwrap()); - - } else if args.flag_list { - let pattern = args.arg_pattern.as_ref().map(|s| &s[..]).unwrap_or("*"); - for name in try!(repo.tag_names(Some(pattern))).iter() { - let name = name.unwrap(); - let obj = try!(repo.revparse_single(name)); - - if let Some(tag) = obj.as_tag() { - print_tag(tag, args); - } else if let Some(commit) = obj.as_commit() { - print_commit(commit, name, args); - } else { - print_name(name); - } - } - } - Ok(()) -} - -fn print_tag(tag: &Tag, args: &Args) { - print!("{:<16}", tag.name().unwrap()); - if args.flag_n.is_some() { - print_list_lines(tag.message(), args); - } else { - println!(""); - } -} - -fn print_commit(commit: &Commit, name: &str, args: &Args) { - print!("{:<16}", name); - if args.flag_n.is_some() { - print_list_lines(commit.message(), args); - } else { - println!(""); - } -} - -fn print_name(name: &str) { - println!("{}", name); -} - -fn print_list_lines(message: Option<&str>, args: &Args) { - let message = match message { Some(s) => s, None => return }; - let mut lines = message.lines().filter(|l| !l.trim().is_empty()); - if let Some(first) = lines.next() { - print!("{}", first); - } - println!(""); - - for line in lines.take(args.flag_n.unwrap_or(0) as usize) { - print!(" {}", line); - } -} - -fn main() { - const USAGE: &'static str = " -usage: - tag [-a] [-f] [-m ] [] - tag -d - tag [-n ] -l [] - -Options: - -n specify number of lines from teh annotation to print - -f, --force replace an existing tag with the given name - -l, --list list tags with names matching the pattern given - -d, --delete delete the tag specified - -m, --message message for a new tag - -h, --help show this message -"; - - let args = Docopt::new(USAGE).and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); - match run(&args) { - Ok(()) => {} - Err(e) => println!("error: {}", e), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/.gitignore cargo-0.19.0/vendor/git2-0.6.3/.gitignore --- cargo-0.17.0/vendor/git2-0.6.3/.gitignore 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -target -Cargo.lock -src/main.rs diff -Nru cargo-0.17.0/vendor/git2-0.6.3/.gitmodules cargo-0.19.0/vendor/git2-0.6.3/.gitmodules --- cargo-0.17.0/vendor/git2-0.6.3/.gitmodules 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/.gitmodules 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -[submodule "libgit2-sys/libgit2"] - path = libgit2-sys/libgit2 - url = https://github.com/libgit2/libgit2 diff -Nru cargo-0.17.0/vendor/git2-0.6.3/LICENSE-APACHE cargo-0.19.0/vendor/git2-0.6.3/LICENSE-APACHE --- cargo-0.17.0/vendor/git2-0.6.3/LICENSE-APACHE 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.17.0/vendor/git2-0.6.3/LICENSE-MIT cargo-0.19.0/vendor/git2-0.6.3/LICENSE-MIT --- cargo-0.17.0/vendor/git2-0.6.3/LICENSE-MIT 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) 2014 Alex Crichton - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/git2-0.6.3/README.md cargo-0.19.0/vendor/git2-0.6.3/README.md --- cargo-0.17.0/vendor/git2-0.6.3/README.md 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ -# git2-rs - -[![Build Status](https://travis-ci.org/alexcrichton/git2-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/git2-rs) -[![Build Status](https://ci.appveyor.com/api/projects/status/6vem3xgno2kuxnfm?svg=true)](https://ci.appveyor.com/project/alexcrichton/git2-rs) - -[Documentation](http://alexcrichton.com/git2-rs/git2/index.html) - -libgit2 bindings for Rust - -```toml -[dependencies] -git2 = "0.6" -``` - -## Building git2-rs - -First, you'll need to install _CMake_. Afterwards, just run: - -```sh -$ git clone https://github.com/alexcrichton/git2-rs -$ cd git2-rs -$ cargo build -``` - -## Building on OSX 10.10+ - -Currently libssh2 requires linking against OpenSSL, and to compile libssh2 it -also needs to find the OpenSSL headers. On OSX 10.10+ the OpenSSL headers have -been removed, but if you're using Homebrew you can install them via: - -```sh -brew install openssl -``` - -To get this library to pick them up the [standard `rust-openssl` -instructions][instr] can be used to transitively inform libssh2-sys about where -the header files are: - -[instr]: https://github.com/sfackler/rust-openssl#osx - -```sh -export OPENSSL_INCLUDE_DIR=`brew --prefix openssl`/include -export OPENSSL_LIB_DIR=`brew --prefix openssl`/lib -``` - -# License - -`git2-rs` is primarily distributed under the terms of both the MIT license and -the Apache License (Version 2.0), with portions covered by various BSD-like -licenses. - -See LICENSE-APACHE, and LICENSE-MIT for details. diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/blame.rs cargo-0.19.0/vendor/git2-0.6.3/src/blame.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/blame.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/blame.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,303 +0,0 @@ -use std::marker; -use {raw, Repository, Oid, signature, Signature}; -use util::{self, Binding}; -use std::path::Path; -use std::ops::Range; -use std::mem; - -/// Opaque structure to hold blame results. -pub struct Blame<'repo> { - raw: *mut raw::git_blame, - _marker: marker::PhantomData<&'repo Repository>, -} - -/// Structure that represents a blame hunk. -pub struct BlameHunk<'blame> { - raw: *mut raw::git_blame_hunk, - _marker: marker::PhantomData<&'blame raw::git_blame>, -} - -/// Blame options -pub struct BlameOptions { - raw: raw::git_blame_options, -} - -/// An iterator over the hunks in a blame. -pub struct BlameIter<'blame> { - range: Range, - blame: &'blame Blame<'blame>, -} - -impl<'repo> Blame<'repo> { - - /// Gets the number of hunks that exist in the blame structure. - pub fn len(&self) -> usize { - unsafe { raw::git_blame_get_hunk_count(self.raw) as usize } - } - - /// Gets the blame hunk at the given index. - pub fn get_index(&self, index: usize) -> Option { - unsafe { - let ptr = raw::git_blame_get_hunk_byindex(self.raw(), index as u32); - if ptr.is_null() { - None - } else { - Some(BlameHunk::from_raw_const(ptr)) - } - } - } - - /// Gets the hunk that relates to the given line number in the newest - /// commit. - pub fn get_line(&self, lineno: usize) -> Option { - unsafe { - let ptr = raw::git_blame_get_hunk_byline(self.raw(), lineno); - if ptr.is_null() { - None - } else { - Some(BlameHunk::from_raw_const(ptr)) - } - } - } - - /// Returns an iterator over the hunks in this blame. - pub fn iter(&self) -> BlameIter { - BlameIter { range: 0..self.len(), blame: self } - } - -} - -impl<'blame> BlameHunk<'blame> { - - unsafe fn from_raw_const(raw: *const raw::git_blame_hunk) - -> BlameHunk<'blame> { - BlameHunk { - raw: raw as *mut raw::git_blame_hunk, - _marker: marker::PhantomData, - } - } - - /// Returns OID of the commit where this line was last changed - pub fn final_commit_id(&self) -> Oid { - unsafe { Oid::from_raw(&(*self.raw).final_commit_id) } - } - - /// Returns signature of the commit. - pub fn final_signature(&self) -> Signature { - unsafe { signature::from_raw_const(self, (*self.raw).final_signature) } - } - - /// Returns line number where this hunk begins. - /// - /// Note that the start line is counting from 1. - pub fn final_start_line(&self) -> usize { - unsafe { (*self.raw).final_start_line_number } - } - - /// Returns the OID of the commit where this hunk was found. - /// - /// This will usually be the same as `final_commit_id`, - /// except when `BlameOptions::track_copies_any_commit_copies` has been - /// turned on - pub fn orig_commit_id(&self) -> Oid { - unsafe { Oid::from_raw(&(*self.raw).orig_commit_id) } - } - - /// Returns signature of the commit. - pub fn orig_signature(&self) -> Signature { - unsafe { signature::from_raw_const(self, (*self.raw).orig_signature) } - } - - /// Returns line number where this hunk begins. - /// - /// Note that the start line is counting from 1. - pub fn orig_start_line(&self) -> usize { - unsafe { (*self.raw).orig_start_line_number} - } - - /// Returns path to the file where this hunk originated. - /// - /// Note: `None` could be returned for non-unicode paths on Widnows. - pub fn path(&self) -> Option<&Path> { - unsafe { - if let Some(bytes) = ::opt_bytes(self, (*self.raw).orig_path) { - Some(util::bytes2path(bytes)) - } else { - None - } - } - } - - /// Tests whether this hunk has been tracked to a boundary commit - /// (the root, or the commit specified in git_blame_options.oldest_commit). - pub fn is_boundary(&self) -> bool { - unsafe { (*self.raw).boundary == 1 } - } - - /// Returns number of lines in this hunk. - pub fn lines_in_hunk(&self) -> usize { - unsafe { (*self.raw).lines_in_hunk as usize } - } -} - -impl BlameOptions { - - /// Initialize options - pub fn new() -> BlameOptions { - unsafe { - let mut raw: raw::git_blame_options = mem::zeroed(); - assert_eq!( - raw::git_blame_init_options(&mut raw, - raw::GIT_BLAME_OPTIONS_VERSION) - , 0); - - Binding::from_raw(&raw as *const _ as *mut _) - } - } - - fn flag(&mut self, opt: u32, val: bool) -> &mut BlameOptions { - if val { - self.raw.flags |= opt; - } else { - self.raw.flags &= !opt; - } - self - } - - /// Track lines that have moved within a file. - pub fn track_copies_same_file(&mut self, opt: bool) -> &mut BlameOptions { - self.flag(raw::GIT_BLAME_TRACK_COPIES_SAME_FILE, opt) - } - - /// Track lines that have moved across files in the same commit. - pub fn track_copies_same_commit_moves(&mut self, opt: bool) -> &mut BlameOptions { - self.flag(raw::GIT_BLAME_TRACK_COPIES_SAME_COMMIT_MOVES, opt) - } - - /// Track lines that have been copied from another file that exists - /// in the same commit. - pub fn track_copies_same_commit_copies(&mut self, opt: bool) -> &mut BlameOptions { - self.flag(raw::GIT_BLAME_TRACK_COPIES_SAME_COMMIT_COPIES, opt) - } - - /// Track lines that have been copied from another file that exists - /// in any commit. - pub fn track_copies_any_commit_copies(&mut self, opt: bool) -> &mut BlameOptions { - self.flag(raw::GIT_BLAME_TRACK_COPIES_ANY_COMMIT_COPIES, opt) - } - - /// Restrict the search of commits to those reachable following only - /// the first parents. - pub fn first_parent(&mut self, opt: bool) -> &mut BlameOptions { - self.flag(raw::GIT_BLAME_FIRST_PARENT, opt) - } - - /// Setter for the id of the newest commit to consider. - pub fn newest_commit(&mut self, id: Oid) -> &mut BlameOptions { - unsafe { self.raw.newest_commit = *id.raw(); } - self - } - - /// Setter for the id of the oldest commit to consider. - pub fn oldest_commit(&mut self, id: Oid) -> &mut BlameOptions { - unsafe { self.raw.oldest_commit = *id.raw(); } - self - } - -} - -impl<'repo> Binding for Blame<'repo> { - type Raw = *mut raw::git_blame; - - unsafe fn from_raw(raw: *mut raw::git_blame) -> Blame<'repo> { - Blame { raw: raw, _marker: marker::PhantomData } - } - - fn raw(&self) -> *mut raw::git_blame { self.raw } -} - -impl<'repo> Drop for Blame<'repo> { - fn drop(&mut self) { - unsafe { raw::git_blame_free(self.raw) } - } -} - -impl<'blame> Binding for BlameHunk<'blame> { - type Raw = *mut raw::git_blame_hunk; - - unsafe fn from_raw(raw: *mut raw::git_blame_hunk) -> BlameHunk<'blame> { - BlameHunk { raw: raw, _marker: marker::PhantomData } - } - - fn raw(&self) -> *mut raw::git_blame_hunk { self.raw } -} - -impl Binding for BlameOptions { - type Raw = *mut raw::git_blame_options; - - unsafe fn from_raw(opts: *mut raw::git_blame_options) -> BlameOptions { - BlameOptions { raw: *opts } - } - - fn raw(&self) -> *mut raw::git_blame_options { - &self.raw as *const _ as *mut _ - } -} - -impl<'blame> Iterator for BlameIter<'blame> { - type Item = BlameHunk<'blame>; - fn next(&mut self) -> Option> { - self.range.next().and_then(|i| self.blame.get_index(i)) - } - - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} - -impl<'blame> DoubleEndedIterator for BlameIter<'blame> { - fn next_back(&mut self) -> Option> { - self.range.next_back().and_then(|i| self.blame.get_index(i)) - } -} - -impl<'blame> ExactSizeIterator for BlameIter<'blame> {} - -#[cfg(test)] -mod tests { - use std::fs::{self, File}; - use std::path::Path; - - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - let mut index = repo.index().unwrap(); - - let root = repo.path().parent().unwrap(); - fs::create_dir(&root.join("foo")).unwrap(); - File::create(&root.join("foo/bar")).unwrap(); - index.add_path(Path::new("foo/bar")).unwrap(); - - let id = index.write_tree().unwrap(); - let tree = repo.find_tree(id).unwrap(); - let sig = repo.signature().unwrap(); - let id = repo.refname_to_id("HEAD").unwrap(); - let parent = repo.find_commit(id).unwrap(); - let commit = repo.commit(Some("HEAD"), &sig, &sig, "commit", - &tree, &[&parent]).unwrap(); - - let blame = repo.blame_file(Path::new("foo/bar"), None).unwrap(); - - assert_eq!(blame.len(), 1); - assert_eq!(blame.iter().count(), 1); - - let hunk = blame.get_index(0).unwrap(); - assert_eq!(hunk.final_commit_id(), commit); - assert_eq!(hunk.final_signature().name(), sig.name()); - assert_eq!(hunk.final_signature().email(), sig.email()); - assert_eq!(hunk.final_start_line(), 1); - assert_eq!(hunk.path(), Some(Path::new("foo/bar"))); - assert_eq!(hunk.lines_in_hunk(), 0); - assert!(!hunk.is_boundary()) - } - -} - diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/blob.rs cargo-0.19.0/vendor/git2-0.6.3/src/blob.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/blob.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/blob.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ -use std::marker; -use std::mem; -use std::slice; - -use {raw, Oid, Object}; -use util::Binding; - -/// A structure to represent a git [blob][1] -/// -/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects -pub struct Blob<'repo> { - raw: *mut raw::git_blob, - _marker: marker::PhantomData>, -} - -impl<'repo> Blob<'repo> { - /// Get the id (SHA1) of a repository blob - pub fn id(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_blob_id(&*self.raw)) } - } - - /// Determine if the blob content is most certainly binary or not. - pub fn is_binary(&self) -> bool { - unsafe { raw::git_blob_is_binary(&*self.raw) == 1 } - } - - /// Get the content of this blob. - pub fn content(&self) -> &[u8] { - unsafe { - let data = raw::git_blob_rawcontent(&*self.raw) as *const u8; - let len = raw::git_blob_rawsize(&*self.raw) as usize; - slice::from_raw_parts(data, len) - } - } - - /// Casts this Blob to be usable as an `Object` - pub fn as_object(&self) -> &Object<'repo> { - unsafe { - &*(self as *const _ as *const Object<'repo>) - } - } - - /// Consumes Blob to be returned as an `Object` - pub fn into_object(self) -> Object<'repo> { - assert_eq!(mem::size_of_val(&self), mem::size_of::()); - unsafe { - mem::transmute(self) - } - } -} - -impl<'repo> Binding for Blob<'repo> { - type Raw = *mut raw::git_blob; - - unsafe fn from_raw(raw: *mut raw::git_blob) -> Blob<'repo> { - Blob { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *mut raw::git_blob { self.raw } -} - - -impl<'repo> Drop for Blob<'repo> { - fn drop(&mut self) { - unsafe { raw::git_blob_free(self.raw) } - } -} - -#[cfg(test)] -mod tests { - use std::io::prelude::*; - use std::fs::File; - use tempdir::TempDir; - use Repository; - - #[test] - fn buffer() { - let td = TempDir::new("test").unwrap(); - let repo = Repository::init(td.path()).unwrap(); - let id = repo.blob(&[5, 4, 6]).unwrap(); - let blob = repo.find_blob(id).unwrap(); - - assert_eq!(blob.id(), id); - assert_eq!(blob.content(), [5, 4, 6]); - assert!(blob.is_binary()); - - repo.find_object(id, None).unwrap().as_blob().unwrap(); - repo.find_object(id, None).unwrap().into_blob().ok().unwrap(); - } - - #[test] - fn path() { - let td = TempDir::new("test").unwrap(); - let path = td.path().join("foo"); - File::create(&path).unwrap().write_all(&[7, 8, 9]).unwrap(); - let repo = Repository::init(td.path()).unwrap(); - let id = repo.blob_path(&path).unwrap(); - let blob = repo.find_blob(id).unwrap(); - assert_eq!(blob.content(), [7, 8, 9]); - blob.into_object(); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/branch.rs cargo-0.19.0/vendor/git2-0.6.3/src/branch.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/branch.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/branch.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,162 +0,0 @@ -use std::ffi::CString; -use std::marker; -use std::str; -use libc; - -use {raw, Error, Reference, BranchType, References}; -use util::Binding; - -/// A structure to represent a git [branch][1] -/// -/// A branch is currently just a wrapper to an underlying `Reference`. The -/// reference can be accessed through the `get` and `unwrap` methods. -/// -/// [1]: http://git-scm.com/book/en/Git-Branching-What-a-Branch-Is -pub struct Branch<'repo> { - inner: Reference<'repo>, -} - -/// An iterator over the branches inside of a repository. -pub struct Branches<'repo> { - raw: *mut raw::git_branch_iterator, - _marker: marker::PhantomData>, -} - -impl<'repo> Branch<'repo> { - /// Creates a new branch from a reference - pub fn wrap(reference: Reference) -> Branch { Branch { inner: reference } } - - /// Gain access to the reference that is this branch - pub fn get(&self) -> &Reference<'repo> { &self.inner } - - /// Take ownership of the underlying reference. - pub fn into_reference(self) -> Reference<'repo> { self.inner } - - /// Delete an existing branch reference. - pub fn delete(&mut self) -> Result<(), Error> { - unsafe { try_call!(raw::git_branch_delete(self.get().raw())); } - Ok(()) - } - - /// Determine if the current local branch is pointed at by HEAD. - pub fn is_head(&self) -> bool { - unsafe { raw::git_branch_is_head(&*self.get().raw()) == 1 } - } - - /// Move/rename an existing local branch reference. - pub fn rename(&mut self, new_branch_name: &str, force: bool) - -> Result, Error> { - let mut ret = 0 as *mut raw::git_reference; - let new_branch_name = try!(CString::new(new_branch_name)); - unsafe { - try_call!(raw::git_branch_move(&mut ret, self.get().raw(), - new_branch_name, force)); - Ok(Branch::wrap(Binding::from_raw(ret))) - } - } - - /// Return the name of the given local or remote branch. - /// - /// May return `Ok(None)` if the name is not valid utf-8. - pub fn name(&self) -> Result, Error> { - self.name_bytes().map(|s| str::from_utf8(s).ok()) - } - - /// Return the name of the given local or remote branch. - pub fn name_bytes(&self) -> Result<&[u8], Error> { - let mut ret = 0 as *const libc::c_char; - unsafe { - try_call!(raw::git_branch_name(&mut ret, &*self.get().raw())); - Ok(::opt_bytes(self, ret).unwrap()) - } - } - - /// Return the reference supporting the remote tracking branch, given a - /// local branch reference. - pub fn upstream<'a>(&'a self) -> Result, Error> { - let mut ret = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_branch_upstream(&mut ret, &*self.get().raw())); - Ok(Branch::wrap(Binding::from_raw(ret))) - } - } - - /// Set the upstream configuration for a given local branch. - /// - /// If `None` is specified, then the upstream branch is unset. The name - /// provided is the name of the branch to set as upstream. - pub fn set_upstream(&mut self, - upstream_name: Option<&str>) -> Result<(), Error> { - let upstream_name = try!(::opt_cstr(upstream_name)); - unsafe { - try_call!(raw::git_branch_set_upstream(self.get().raw(), - upstream_name)); - Ok(()) - } - } -} - -impl<'repo> Branches<'repo> { - /// Creates a new iterator from the raw pointer given. - /// - /// This function is unsafe as it is not guaranteed that `raw` is a valid - /// pointer. - pub unsafe fn from_raw(raw: *mut raw::git_branch_iterator) - -> Branches<'repo> { - Branches { - raw: raw, - _marker: marker::PhantomData, - } - } -} - -impl<'repo> Iterator for Branches<'repo> { - type Item = Result<(Branch<'repo>, BranchType), Error>; - fn next(&mut self) -> Option, BranchType), Error>> { - let mut ret = 0 as *mut raw::git_reference; - let mut typ = raw::GIT_BRANCH_LOCAL; - unsafe { - try_call_iter!(raw::git_branch_next(&mut ret, &mut typ, self.raw)); - let typ = match typ { - raw::GIT_BRANCH_LOCAL => BranchType::Local, - raw::GIT_BRANCH_REMOTE => BranchType::Remote, - n => panic!("unexected branch type: {}", n), - }; - Some(Ok((Branch::wrap(Binding::from_raw(ret)), typ))) - } - } -} - -impl<'repo> Drop for Branches<'repo> { - fn drop(&mut self) { - unsafe { raw::git_branch_iterator_free(self.raw) } - } -} - -#[cfg(test)] -mod tests { - use BranchType; - - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - let head = repo.head().unwrap(); - let target = head.target().unwrap(); - let commit = repo.find_commit(target).unwrap(); - - let mut b1 = repo.branch("foo", &commit, false).unwrap(); - assert!(!b1.is_head()); - repo.branch("foo2", &commit, false).unwrap(); - - assert_eq!(repo.branches(None).unwrap().count(), 3); - repo.find_branch("foo", BranchType::Local).unwrap(); - let mut b1 = b1.rename("bar", false).unwrap(); - assert_eq!(b1.name().unwrap(), Some("bar")); - assert!(b1.upstream().is_err()); - b1.set_upstream(Some("master")).unwrap(); - b1.upstream().unwrap(); - b1.set_upstream(None).unwrap(); - - b1.delete().unwrap(); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/buf.rs cargo-0.19.0/vendor/git2-0.6.3/src/buf.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/buf.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/buf.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -use std::slice; -use std::str; -use std::ops::{Deref, DerefMut}; -use libc; - -use raw; -use util::Binding; - -/// A structure to wrap an intermediate buffer used by libgit2. -/// -/// A buffer can be thought of a `Vec`, but the `Vec` type is not used to -/// avoid copying data back and forth. -pub struct Buf { - raw: raw::git_buf, -} - -impl Buf { - /// Creates a new empty buffer. - pub fn new() -> Buf { - ::init(); - unsafe { - Binding::from_raw(&mut raw::git_buf { - ptr: 0 as *mut libc::c_char, - size: 0, - asize: 0, - } as *mut _) - } - } - - /// Attempt to view this buffer as a string slice. - /// - /// Returns `None` if the buffer is not valid utf-8. - pub fn as_str(&self) -> Option<&str> { str::from_utf8(&**self).ok() } -} - -impl Deref for Buf { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self.raw.ptr as *const u8, - self.raw.size as usize) - } - } -} - -impl DerefMut for Buf { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self.raw.ptr as *mut u8, - self.raw.size as usize) - } - } -} - -impl Binding for Buf { - type Raw = *mut raw::git_buf; - unsafe fn from_raw(raw: *mut raw::git_buf) -> Buf { - Buf { raw: *raw } - } - fn raw(&self) -> *mut raw::git_buf { &self.raw as *const _ as *mut _ } -} - -impl Drop for Buf { - fn drop(&mut self) { - unsafe { raw::git_buf_free(&mut self.raw) } - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/build.rs cargo-0.19.0/vendor/git2-0.6.3/src/build.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/build.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/build.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,548 +0,0 @@ -//! Builder-pattern objects for configuration various git operations. - -use std::ffi::{CStr, CString}; -use std::mem; -use std::path::Path; -use libc::{c_char, size_t, c_void, c_uint, c_int}; - -use {raw, panic, Error, Repository, FetchOptions, IntoCString}; -use {CheckoutNotificationType, DiffFile}; -use util::{self, Binding}; - -/// A builder struct which is used to build configuration for cloning a new git -/// repository. -pub struct RepoBuilder<'cb> { - bare: bool, - branch: Option, - local: bool, - hardlinks: bool, - checkout: Option>, - fetch_opts: Option>, -} - -/// A builder struct for configuring checkouts of a repository. -pub struct CheckoutBuilder<'cb> { - their_label: Option, - our_label: Option, - ancestor_label: Option, - target_dir: Option, - paths: Vec, - path_ptrs: Vec<*const c_char>, - file_perm: Option, - dir_perm: Option, - disable_filters: bool, - checkout_opts: u32, - progress: Option>>, - notify: Option>>, - notify_flags: CheckoutNotificationType, -} - -/// Checkout progress notification callback. -/// -/// The first argument is the path for the notification, the next is the numver -/// of completed steps so far, and the final is the total number of steps. -pub type Progress<'a> = FnMut(Option<&Path>, usize, usize) + 'a; - -/// Checkout notifications callback. -/// -/// The first argument is the notification type, the next is the path for the -/// the notification, followed by the baseline diff, target diff, and workdir diff. -/// -/// The callback must return a bool specifying whether the checkout should -/// continue. -pub type Notify<'a> = FnMut(CheckoutNotificationType, Option<&Path>, DiffFile, - DiffFile, DiffFile) -> bool + 'a; - -impl<'cb> RepoBuilder<'cb> { - /// Creates a new repository builder with all of the default configuration. - /// - /// When ready, the `clone()` method can be used to clone a new repository - /// using this configuration. - pub fn new() -> RepoBuilder<'cb> { - ::init(); - RepoBuilder { - bare: false, - branch: None, - local: true, - hardlinks: true, - checkout: None, - fetch_opts: None, - } - } - - /// Indicate whether the repository will be cloned as a bare repository or - /// not. - pub fn bare(&mut self, bare: bool) -> &mut RepoBuilder<'cb> { - self.bare = bare; - self - } - - /// Specify the name of the branch to check out after the clone. - /// - /// If not specified, the remote's default branch will be used. - pub fn branch(&mut self, branch: &str) -> &mut RepoBuilder<'cb> { - self.branch = Some(CString::new(branch).unwrap()); - self - } - - /// Set the flag for bypassing the git aware transport mechanism for local - /// paths. - /// - /// If `true`, the git-aware transport will be bypassed for local paths. If - /// `false`, the git-aware transport will not be bypassed. - pub fn local(&mut self, local: bool) -> &mut RepoBuilder<'cb> { - self.local = local; - self - } - - /// Set the flag for whether hardlinks are used when using a local git-aware - /// transport mechanism. - pub fn hardlinks(&mut self, links: bool) -> &mut RepoBuilder<'cb> { - self.hardlinks = links; - self - } - - /// Configure the checkout which will be performed by consuming a checkout - /// builder. - pub fn with_checkout(&mut self, checkout: CheckoutBuilder<'cb>) - -> &mut RepoBuilder<'cb> { - self.checkout = Some(checkout); - self - } - - /// Options which control the fetch, including callbacks. - /// - /// The callbacks are used for reporting fetch progress, and for acquiring - /// credentials in the event they are needed. - pub fn fetch_options(&mut self, fetch_opts: FetchOptions<'cb>) - -> &mut RepoBuilder<'cb> { - self.fetch_opts = Some(fetch_opts); - self - } - - /// Clone a remote repository. - /// - /// This will use the options configured so far to clone the specified url - /// into the specified local path. - pub fn clone(&mut self, url: &str, into: &Path) -> Result { - let mut opts: raw::git_clone_options = unsafe { mem::zeroed() }; - unsafe { - try_call!(raw::git_clone_init_options(&mut opts, - raw::GIT_CLONE_OPTIONS_VERSION)); - } - opts.bare = self.bare as c_int; - opts.checkout_branch = self.branch.as_ref().map(|s| { - s.as_ptr() - }).unwrap_or(0 as *const _); - - opts.local = match (self.local, self.hardlinks) { - (true, false) => raw::GIT_CLONE_LOCAL_NO_LINKS, - (false, _) => raw::GIT_CLONE_NO_LOCAL, - (true, _) => raw::GIT_CLONE_LOCAL_AUTO, - }; - opts.checkout_opts.checkout_strategy = - raw::GIT_CHECKOUT_SAFE as c_uint; - - match self.fetch_opts { - Some(ref mut cbs) => { - opts.fetch_opts = cbs.raw(); - }, - None => {} - } - - match self.checkout { - Some(ref mut c) => unsafe { c.configure(&mut opts.checkout_opts) }, - None => {} - } - - let url = try!(CString::new(url)); - let into = try!(into.into_c_string()); - let mut raw = 0 as *mut raw::git_repository; - unsafe { - try_call!(raw::git_clone(&mut raw, url, into, &opts)); - Ok(Binding::from_raw(raw)) - } - } -} - -impl<'cb> CheckoutBuilder<'cb> { - /// Creates a new builder for checkouts with all of its default - /// configuration. - pub fn new() -> CheckoutBuilder<'cb> { - ::init(); - CheckoutBuilder { - disable_filters: false, - dir_perm: None, - file_perm: None, - path_ptrs: Vec::new(), - paths: Vec::new(), - target_dir: None, - ancestor_label: None, - our_label: None, - their_label: None, - checkout_opts: raw::GIT_CHECKOUT_SAFE as u32, - progress: None, - notify: None, - notify_flags: CheckoutNotificationType::empty(), - } - } - - /// Indicate that this checkout should perform a dry run by checking for - /// conflicts but not make any actual changes. - pub fn dry_run(&mut self) -> &mut CheckoutBuilder<'cb> { - self.checkout_opts &= !((1 << 4) - 1); - self.checkout_opts |= raw::GIT_CHECKOUT_NONE as u32; - self - } - - /// Take any action necessary to get the working directory to match the - /// target including potentially discarding modified files. - pub fn force(&mut self) -> &mut CheckoutBuilder<'cb> { - self.checkout_opts &= !((1 << 4) - 1); - self.checkout_opts |= raw::GIT_CHECKOUT_FORCE as u32; - self - } - - /// Indicate that the checkout should be performed safely, allowing new - /// files to be created but not overwriting extisting files or changes. - /// - /// This is the default. - pub fn safe(&mut self) -> &mut CheckoutBuilder<'cb> { - self.checkout_opts &= !((1 << 4) - 1); - self.checkout_opts |= raw::GIT_CHECKOUT_SAFE as u32; - self - } - - fn flag(&mut self, bit: raw::git_checkout_strategy_t, - on: bool) -> &mut CheckoutBuilder<'cb> { - if on { - self.checkout_opts |= bit as u32; - } else { - self.checkout_opts &= !(bit as u32); - } - self - } - - /// In safe mode, create files that don't exist. - /// - /// Defaults to false. - pub fn recreate_missing(&mut self, allow: bool) -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_RECREATE_MISSING, allow) - } - - /// In safe mode, apply safe file updates even when there are conflicts - /// instead of canceling the checkout. - /// - /// Defaults to false. - pub fn allow_conflicts(&mut self, allow: bool) -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_ALLOW_CONFLICTS, allow) - } - - /// Remove untracked files from the working dir. - /// - /// Defaults to false. - pub fn remove_untracked(&mut self, remove: bool) - -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_REMOVE_UNTRACKED, remove) - } - - /// Remove ignored files from the working dir. - /// - /// Defaults to false. - pub fn remove_ignored(&mut self, remove: bool) -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_REMOVE_IGNORED, remove) - } - - /// Only update the contents of files that already exist. - /// - /// If set, files will not be created or deleted. - /// - /// Defaults to false. - pub fn update_only(&mut self, update: bool) -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_UPDATE_ONLY, update) - } - - /// Prevents checkout from writing the updated files' information to the - /// index. - /// - /// Defaults to true. - pub fn update_index(&mut self, update: bool) -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_DONT_UPDATE_INDEX, !update) - } - - /// Indicate whether the index and git attributes should be refreshed from - /// disk before any operations. - /// - /// Defaults to true, - pub fn refresh(&mut self, refresh: bool) -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_NO_REFRESH, !refresh) - } - - /// Skip files with unmerged index entries. - /// - /// Defaults to false. - pub fn skip_unmerged(&mut self, skip: bool) -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_SKIP_UNMERGED, skip) - } - - /// Indicate whether the checkout should proceed on conflicts by using the - /// stage 2 version of the file ("ours"). - /// - /// Defaults to false. - pub fn use_ours(&mut self, ours: bool) -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_USE_OURS, ours) - } - - /// Indicate whether the checkout should proceed on conflicts by using the - /// stage 3 version of the file ("theirs"). - /// - /// Defaults to false. - pub fn use_theirs(&mut self, theirs: bool) -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_USE_THEIRS, theirs) - } - - /// Indicate whether ignored files should be overwritten during the checkout. - /// - /// Defaults to true. - pub fn overwrite_ignored(&mut self, overwrite: bool) - -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_DONT_OVERWRITE_IGNORED, !overwrite) - } - - /// Indicate whether a normal merge file should be written for conflicts. - /// - /// Defaults to false. - pub fn conflict_style_merge(&mut self, on: bool) - -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_CONFLICT_STYLE_MERGE, on) - } - - /// Specify for which notification types to invoke the notification - /// callback. - /// - /// Defaults to none. - pub fn notify_on(&mut self, notification_types: CheckoutNotificationType) - -> &mut CheckoutBuilder<'cb> { - self.notify_flags = notification_types; - self - } - - /// Indicates whether to include common ancestor data in diff3 format files - /// for conflicts. - /// - /// Defaults to false. - pub fn conflict_style_diff3(&mut self, on: bool) - -> &mut CheckoutBuilder<'cb> { - self.flag(raw::GIT_CHECKOUT_CONFLICT_STYLE_DIFF3, on) - } - - /// Indicate whether to apply filters like CRLF conversion. - pub fn disable_filters(&mut self, disable: bool) - -> &mut CheckoutBuilder<'cb> { - self.disable_filters = disable; - self - } - - /// Set the mode with which new directories are created. - /// - /// Default is 0755 - pub fn dir_perm(&mut self, perm: i32) -> &mut CheckoutBuilder<'cb> { - self.dir_perm = Some(perm); - self - } - - /// Set the mode with which new files are created. - /// - /// The default is 0644 or 0755 as dictated by the blob. - pub fn file_perm(&mut self, perm: i32) -> &mut CheckoutBuilder<'cb> { - self.file_perm = Some(perm); - self - } - - /// Add a path to be checked out. - /// - /// If no paths are specified, then all files are checked out. Otherwise - /// only these specified paths are checked out. - pub fn path(&mut self, path: T) - -> &mut CheckoutBuilder<'cb> { - let path = path.into_c_string().unwrap(); - self.path_ptrs.push(path.as_ptr()); - self.paths.push(path); - self - } - - /// Set the directory to check out to - pub fn target_dir(&mut self, dst: &Path) -> &mut CheckoutBuilder<'cb> { - self.target_dir = Some(dst.into_c_string().unwrap()); - self - } - - /// The name of the common ancestor side of conflicts - pub fn ancestor_label(&mut self, label: &str) -> &mut CheckoutBuilder<'cb> { - self.ancestor_label = Some(CString::new(label).unwrap()); - self - } - - /// The name of the common our side of conflicts - pub fn our_label(&mut self, label: &str) -> &mut CheckoutBuilder<'cb> { - self.our_label = Some(CString::new(label).unwrap()); - self - } - - /// The name of the common their side of conflicts - pub fn their_label(&mut self, label: &str) -> &mut CheckoutBuilder<'cb> { - self.their_label = Some(CString::new(label).unwrap()); - self - } - - /// Set a callback to receive notifications of checkout progress. - pub fn progress(&mut self, cb: F) -> &mut CheckoutBuilder<'cb> - where F: FnMut(Option<&Path>, usize, usize) + 'cb { - self.progress = Some(Box::new(cb) as Box>); - self - } - - /// Set a callback to receive checkout notifications. - /// - /// Callbacks are invoked prior to modifying any files on disk. - /// Returning `false` from the callback will cancel the checkout. - pub fn notify(&mut self, cb: F) -> &mut CheckoutBuilder<'cb> - where F: FnMut(CheckoutNotificationType, Option<&Path>, DiffFile, - DiffFile, DiffFile) -> bool + 'cb - { - self.notify = Some(Box::new(cb) as Box>); - self - } - - /// Configure a raw checkout options based on this configuration. - /// - /// This method is unsafe as there is no guarantee that this structure will - /// outlive the provided checkout options. - pub unsafe fn configure(&mut self, opts: &mut raw::git_checkout_options) { - opts.version = raw::GIT_CHECKOUT_OPTIONS_VERSION; - opts.disable_filters = self.disable_filters as c_int; - opts.dir_mode = self.dir_perm.unwrap_or(0) as c_uint; - opts.file_mode = self.file_perm.unwrap_or(0) as c_uint; - - if self.path_ptrs.len() > 0 { - opts.paths.strings = self.path_ptrs.as_ptr() as *mut _; - opts.paths.count = self.path_ptrs.len() as size_t; - } - - match self.target_dir { - Some(ref c) => opts.target_directory = c.as_ptr(), - None => {} - } - match self.ancestor_label { - Some(ref c) => opts.ancestor_label = c.as_ptr(), - None => {} - } - match self.our_label { - Some(ref c) => opts.our_label = c.as_ptr(), - None => {} - } - match self.their_label { - Some(ref c) => opts.their_label = c.as_ptr(), - None => {} - } - if self.progress.is_some() { - let f: raw::git_checkout_progress_cb = progress_cb; - opts.progress_cb = Some(f); - opts.progress_payload = self as *mut _ as *mut _; - } - if self.notify.is_some() { - let f: raw::git_checkout_notify_cb = notify_cb; - opts.notify_cb = Some(f); - opts.notify_payload = self as *mut _ as *mut _; - opts.notify_flags = self.notify_flags.bits() as c_uint; - } - opts.checkout_strategy = self.checkout_opts as c_uint; - } -} - -extern fn progress_cb(path: *const c_char, - completed: size_t, - total: size_t, - data: *mut c_void) { - panic::wrap(|| unsafe { - let payload = &mut *(data as *mut CheckoutBuilder); - let callback = match payload.progress { - Some(ref mut c) => c, - None => return, - }; - let path = if path.is_null() { - None - } else { - Some(util::bytes2path(CStr::from_ptr(path).to_bytes())) - }; - callback(path, completed as usize, total as usize) - }); -} - -extern fn notify_cb(why: raw::git_checkout_notify_t, - path: *const c_char, - baseline: *const raw::git_diff_file, - target: *const raw::git_diff_file, - workdir: *const raw::git_diff_file, - data: *mut c_void) -> c_int { - // pack callback etc - panic::wrap(|| unsafe { - let payload = &mut *(data as *mut CheckoutBuilder); - let callback = match payload.notify { - Some(ref mut c) => c, - None => return 0, - }; - let path = if path.is_null() { - None - } else { - Some(util::bytes2path(CStr::from_ptr(path).to_bytes())) - }; - - let why = CheckoutNotificationType::from_bits_truncate(why as u32); - let keep_going = callback(why, - path, - DiffFile::from_raw(baseline), - DiffFile::from_raw(target), - DiffFile::from_raw(workdir)); - if keep_going {0} else {1} - }).unwrap_or(2) -} - -#[cfg(test)] -mod tests { - use std::fs; - use std::path::Path; - use tempdir::TempDir; - use super::RepoBuilder; - use Repository; - - #[test] - fn smoke() { - let r = RepoBuilder::new().clone("/path/to/nowhere", Path::new("foo")); - assert!(r.is_err()); - } - - #[test] - fn smoke2() { - let td = TempDir::new("test").unwrap(); - Repository::init_bare(&td.path().join("bare")).unwrap(); - let url = if cfg!(unix) { - format!("file://{}/bare", td.path().display()) - } else { - format!("file:///{}/bare", td.path().display().to_string() - .replace("\\", "/")) - }; - - let dst = td.path().join("foo"); - RepoBuilder::new().clone(&url, &dst).unwrap(); - fs::remove_dir_all(&dst).unwrap(); - RepoBuilder::new().local(false).clone(&url, &dst).unwrap(); - fs::remove_dir_all(&dst).unwrap(); - RepoBuilder::new().local(false).hardlinks(false).bare(true) - .clone(&url, &dst).unwrap(); - fs::remove_dir_all(&dst).unwrap(); - assert!(RepoBuilder::new().branch("foo") - .clone(&url, &dst).is_err()); - } - -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/call.rs cargo-0.19.0/vendor/git2-0.6.3/src/call.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/call.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/call.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,217 +0,0 @@ -#![macro_use] -use libc; - -use Error; - -macro_rules! call { - (raw::$p:ident ($($e:expr),*)) => ( - raw::$p($(::call::convert(&$e)),*) - ) -} - -macro_rules! try_call { - (raw::$p:ident ($($e:expr),*)) => ({ - match ::call::try(raw::$p($(::call::convert(&$e)),*)) { - Ok(o) => o, - Err(e) => { ::panic::check(); return Err(e) } - } - }) -} - -macro_rules! try_call_iter { - ($($f:tt)*) => { - match call!($($f)*) { - 0 => {} - raw::GIT_ITEROVER => return None, - e => return Some(Err(::call::last_error(e))) - } - } -} - -#[doc(hidden)] -pub trait Convert { - fn convert(&self) -> T; -} - -pub fn convert>(u: &U) -> T { u.convert() } - -pub fn try(ret: libc::c_int) -> Result { - match ret { - n if n < 0 => Err(last_error(n)), - n => Ok(n), - } -} - -pub fn last_error(code: libc::c_int) -> Error { - // Apparently libgit2 isn't necessarily guaranteed to set the last error - // whenever a function returns a negative value! - Error::last_error(code).unwrap_or_else(|| { - Error::from_str("an unknown error occurred") - }) -} - -mod impls { - use std::ffi::CString; - use libc; - - use {raw, ConfigLevel, ResetType, ObjectType, BranchType, Direction}; - use {DiffFormat, FileFavor, SubmoduleIgnore, AutotagOption, FetchPrune}; - use call::Convert; - - impl Convert for T { - fn convert(&self) -> T { *self } - } - - impl Convert for bool { - fn convert(&self) -> libc::c_int { *self as libc::c_int } - } - impl<'a, T> Convert<*const T> for &'a T { - fn convert(&self) -> *const T { *self as *const T } - } - impl<'a, T> Convert<*mut T> for &'a mut T { - fn convert(&self) -> *mut T { &**self as *const T as *mut T } - } - impl Convert<*const T> for *mut T { - fn convert(&self) -> *const T { *self as *const T } - } - - impl Convert<*const libc::c_char> for CString { - fn convert(&self) -> *const libc::c_char { self.as_ptr() } - } - - impl> Convert<*const T> for Option { - fn convert(&self) -> *const T { - self.as_ref().map(|s| s.convert()).unwrap_or(0 as *const _) - } - } - - impl> Convert<*mut T> for Option { - fn convert(&self) -> *mut T { - self.as_ref().map(|s| s.convert()).unwrap_or(0 as *mut _) - } - } - - impl Convert for ResetType { - fn convert(&self) -> raw::git_reset_t { - match *self { - ResetType::Soft => raw::GIT_RESET_SOFT, - ResetType::Hard => raw::GIT_RESET_HARD, - ResetType::Mixed => raw::GIT_RESET_MIXED, - } - } - } - - impl Convert for Direction { - fn convert(&self) -> raw::git_direction { - match *self { - Direction::Push => raw::GIT_DIRECTION_PUSH, - Direction::Fetch => raw::GIT_DIRECTION_FETCH, - } - } - } - - impl Convert for ObjectType { - fn convert(&self) -> raw::git_otype { - match *self { - ObjectType::Any => raw::GIT_OBJ_ANY, - ObjectType::Commit => raw::GIT_OBJ_COMMIT, - ObjectType::Tree => raw::GIT_OBJ_TREE, - ObjectType::Blob => raw::GIT_OBJ_BLOB, - ObjectType::Tag => raw::GIT_OBJ_TAG, - } - } - } - - impl Convert for Option { - fn convert(&self) -> raw::git_otype { - self.unwrap_or(ObjectType::Any).convert() - } - } - - impl Convert for BranchType { - fn convert(&self) -> raw::git_branch_t { - match *self { - BranchType::Remote => raw::GIT_BRANCH_REMOTE, - BranchType::Local => raw::GIT_BRANCH_LOCAL, - } - } - } - - impl Convert for Option { - fn convert(&self) -> raw::git_branch_t { - self.map(|s| s.convert()).unwrap_or(raw::GIT_BRANCH_ALL) - } - } - - impl Convert for ConfigLevel { - fn convert(&self) -> raw::git_config_level_t { - match *self { - ConfigLevel::ProgramData => raw::GIT_CONFIG_LEVEL_PROGRAMDATA, - ConfigLevel::System => raw::GIT_CONFIG_LEVEL_SYSTEM, - ConfigLevel::XDG => raw::GIT_CONFIG_LEVEL_XDG, - ConfigLevel::Global => raw::GIT_CONFIG_LEVEL_GLOBAL, - ConfigLevel::Local => raw::GIT_CONFIG_LEVEL_LOCAL, - ConfigLevel::App => raw::GIT_CONFIG_LEVEL_APP, - ConfigLevel::Highest => raw::GIT_CONFIG_HIGHEST_LEVEL, - } - } - } - - impl Convert for DiffFormat { - fn convert(&self) -> raw::git_diff_format_t { - match *self { - DiffFormat::Patch => raw::GIT_DIFF_FORMAT_PATCH, - DiffFormat::PatchHeader => raw::GIT_DIFF_FORMAT_PATCH_HEADER, - DiffFormat::Raw => raw::GIT_DIFF_FORMAT_RAW, - DiffFormat::NameOnly => raw::GIT_DIFF_FORMAT_NAME_ONLY, - DiffFormat::NameStatus => raw::GIT_DIFF_FORMAT_NAME_STATUS, - } - } - } - - impl Convert for FileFavor { - fn convert(&self) -> raw::git_merge_file_favor_t { - match *self { - FileFavor::Normal => raw::GIT_MERGE_FILE_FAVOR_NORMAL, - FileFavor::Ours => raw::GIT_MERGE_FILE_FAVOR_OURS, - FileFavor::Theirs => raw::GIT_MERGE_FILE_FAVOR_THEIRS, - FileFavor::Union => raw::GIT_MERGE_FILE_FAVOR_UNION, - } - } - } - - impl Convert for SubmoduleIgnore { - fn convert(&self) -> raw::git_submodule_ignore_t { - match *self { - SubmoduleIgnore::Unspecified => - raw::GIT_SUBMODULE_IGNORE_UNSPECIFIED, - SubmoduleIgnore::None => raw::GIT_SUBMODULE_IGNORE_NONE, - SubmoduleIgnore::Untracked => raw::GIT_SUBMODULE_IGNORE_UNTRACKED, - SubmoduleIgnore::Dirty => raw::GIT_SUBMODULE_IGNORE_DIRTY, - SubmoduleIgnore::All => raw::GIT_SUBMODULE_IGNORE_ALL, - } - } - } - - impl Convert for AutotagOption { - fn convert(&self) -> raw::git_remote_autotag_option_t { - match *self { - AutotagOption::Unspecified => - raw::GIT_REMOTE_DOWNLOAD_TAGS_UNSPECIFIED, - AutotagOption::None => raw::GIT_REMOTE_DOWNLOAD_TAGS_NONE, - AutotagOption::Auto => raw::GIT_REMOTE_DOWNLOAD_TAGS_AUTO, - AutotagOption::All => raw::GIT_REMOTE_DOWNLOAD_TAGS_ALL, - } - } - } - - impl Convert for FetchPrune { - fn convert(&self) -> raw::git_fetch_prune_t { - match *self { - FetchPrune::Unspecified => raw::GIT_FETCH_PRUNE_UNSPECIFIED, - FetchPrune::On => raw::GIT_FETCH_PRUNE, - FetchPrune::Off => raw::GIT_FETCH_NO_PRUNE, - } - } - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/cert.rs cargo-0.19.0/vendor/git2-0.6.3/src/cert.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/cert.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/cert.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -//! Certificate types which are passed to `CertificateCheck` in -//! `RemoteCallbacks`. - -use std::marker; -use std::mem; -use std::slice; - -use raw; -use util::Binding; - -/// A certificate for a remote connection, viewable as one of `CertHostkey` or -/// `CertX509` currently. -pub struct Cert<'a> { - raw: *mut raw::git_cert, - _marker: marker::PhantomData<&'a raw::git_cert>, -} - -/// Hostkey information taken from libssh2 -pub struct CertHostkey<'a> { - raw: *mut raw::git_cert_hostkey, - _marker: marker::PhantomData<&'a raw::git_cert>, -} - -/// X.509 certificate information -pub struct CertX509<'a> { - raw: *mut raw::git_cert_x509, - _marker: marker::PhantomData<&'a raw::git_cert>, -} - -impl<'a> Cert<'a> { - /// Attempt to view this certificate as an SSH hostkey. - /// - /// Returns `None` if this is not actually an SSH hostkey. - pub fn as_hostkey(&self) -> Option<&CertHostkey<'a>> { - self.cast(raw::GIT_CERT_HOSTKEY_LIBSSH2) - } - - /// Attempt to view this certificate as an X.509 certificate. - /// - /// Returns `None` if this is not actually an X.509 certificate. - pub fn as_x509(&self) -> Option<&CertX509<'a>> { - self.cast(raw::GIT_CERT_X509) - } - - fn cast(&self, kind: raw::git_cert_t) -> Option<&T> { - assert_eq!(mem::size_of::>(), mem::size_of::()); - unsafe { - if kind == (*self.raw).cert_type { - Some(&*(self as *const Cert<'a> as *const T)) - } else { - None - } - } - } -} - -impl<'a> CertHostkey<'a> { - /// Returns the md5 hash of the hostkey, if available. - pub fn hash_md5(&self) -> Option<&[u8; 16]> { - unsafe { - if (*self.raw).kind as u32 & raw::GIT_CERT_SSH_MD5 as u32 == 0 { - None - } else { - Some(&(*self.raw).hash_md5) - } - } - } - - /// Returns the SHA-1 hash of the hostkey, if available. - pub fn hash_sha1(&self) -> Option<&[u8; 20]> { - unsafe { - if (*self.raw).kind as u32 & raw::GIT_CERT_SSH_SHA1 as u32 == 0 { - None - } else { - Some(&(*self.raw).hash_sha1) - } - } - } -} - -impl<'a> CertX509<'a> { - /// Return the X.509 certificate data as a byte slice - pub fn data(&self) -> &[u8] { - unsafe { - slice::from_raw_parts((*self.raw).data as *const u8, - (*self.raw).len as usize) - } - } -} - -impl<'a> Binding for Cert<'a> { - type Raw = *mut raw::git_cert; - unsafe fn from_raw(raw: *mut raw::git_cert) -> Cert<'a> { - Cert { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *mut raw::git_cert { self.raw } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/commit.rs cargo-0.19.0/vendor/git2-0.6.3/src/commit.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/commit.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/commit.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,340 +0,0 @@ -use std::marker; -use std::mem; -use std::ops::Range; -use std::str; -use libc; - -use {raw, signature, Oid, Error, Signature, Tree, Time, Object}; -use util::Binding; - -/// A structure to represent a git [commit][1] -/// -/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects -pub struct Commit<'repo> { - raw: *mut raw::git_commit, - _marker: marker::PhantomData>, -} - -/// An iterator over the parent commits of a commit. -pub struct Parents<'commit, 'repo: 'commit> { - range: Range, - commit: &'commit Commit<'repo>, -} - -/// An iterator over the parent commits' ids of a commit. -pub struct ParentIds<'commit> { - range: Range, - commit: &'commit Commit<'commit>, -} - -impl<'repo> Commit<'repo> { - /// Get the id (SHA1) of a repository commit - pub fn id(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_commit_id(&*self.raw)) } - } - - /// Get the id of the tree pointed to by this commit. - /// - /// No attempts are made to fetch an object from the ODB. - pub fn tree_id(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_commit_tree_id(&*self.raw)) } - } - - /// Get the tree pointed to by a commit. - pub fn tree(&self) -> Result, Error> { - let mut ret = 0 as *mut raw::git_tree; - unsafe { - try_call!(raw::git_commit_tree(&mut ret, &*self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// Get access to the underlying raw pointer. - pub fn raw(&self) -> *mut raw::git_commit { self.raw } - - /// Get the full message of a commit. - /// - /// The returned message will be slightly prettified by removing any - /// potential leading newlines. - /// - /// `None` will be returned if the message is not valid utf-8 - pub fn message(&self) -> Option<&str> { - str::from_utf8(self.message_bytes()).ok() - } - - /// Get the full message of a commit as a byte slice. - /// - /// The returned message will be slightly prettified by removing any - /// potential leading newlines. - pub fn message_bytes(&self) -> &[u8] { - unsafe { - ::opt_bytes(self, raw::git_commit_message(&*self.raw)).unwrap() - } - } - - /// Get the encoding for the message of a commit, as a string representing a - /// standard encoding name. - /// - /// `None` will be returned if the encoding is not known - pub fn message_encoding(&self) -> Option<&str> { - let bytes = unsafe { - ::opt_bytes(self, raw::git_commit_message(&*self.raw)) - }; - bytes.map(|b| str::from_utf8(b).unwrap()) - } - - /// Get the full raw message of a commit. - /// - /// `None` will be returned if the message is not valid utf-8 - pub fn message_raw(&self) -> Option<&str> { - str::from_utf8(self.message_raw_bytes()).ok() - } - - /// Get the full raw message of a commit. - pub fn message_raw_bytes(&self) -> &[u8] { - unsafe { - ::opt_bytes(self, raw::git_commit_message_raw(&*self.raw)).unwrap() - } - } - - /// Get the full raw text of the commit header. - /// - /// `None` will be returned if the message is not valid utf-8 - pub fn raw_header(&self) -> Option<&str> { - str::from_utf8(self.raw_header_bytes()).ok() - } - - /// Get the full raw text of the commit header. - pub fn raw_header_bytes(&self) -> &[u8] { - unsafe { - ::opt_bytes(self, raw::git_commit_raw_header(&*self.raw)).unwrap() - } - } - - /// Get the short "summary" of the git commit message. - /// - /// The returned message is the summary of the commit, comprising the first - /// paragraph of the message with whitespace trimmed and squashed. - /// - /// `None` may be returned if an error occurs or if the summary is not valid - /// utf-8. - pub fn summary(&mut self) -> Option<&str> { - self.summary_bytes().and_then(|s| str::from_utf8(s).ok()) - } - - /// Get the short "summary" of the git commit message. - /// - /// The returned message is the summary of the commit, comprising the first - /// paragraph of the message with whitespace trimmed and squashed. - /// - /// `None` may be returned if an error occurs - pub fn summary_bytes(&mut self) -> Option<&[u8]> { - unsafe { ::opt_bytes(self, raw::git_commit_summary(self.raw)) } - } - - /// Get the commit time (i.e. committer time) of a commit. - /// - /// The first element of the tuple is the time, in seconds, since the epoch. - /// The second element is the offset, in minutes, of the time zone of the - /// committer's preferred time zone. - pub fn time(&self) -> Time { - unsafe { - Time::new(raw::git_commit_time(&*self.raw) as i64, - raw::git_commit_time_offset(&*self.raw) as i32) - } - } - - /// Creates a new iterator over the parents of this commit. - pub fn parents<'a>(&'a self) -> Parents<'a, 'repo> { - let max = unsafe { raw::git_commit_parentcount(&*self.raw) as usize }; - Parents { range: 0..max, commit: self } - } - - /// Creates a new iterator over the parents of this commit. - pub fn parent_ids(&self) -> ParentIds { - let max = unsafe { raw::git_commit_parentcount(&*self.raw) as usize }; - ParentIds { range: 0..max, commit: self } - } - - /// Get the author of this commit. - pub fn author(&self) -> Signature { - unsafe { - let ptr = raw::git_commit_author(&*self.raw); - signature::from_raw_const(self, ptr) - } - } - - /// Get the committer of this commit. - pub fn committer(&self) -> Signature { - unsafe { - let ptr = raw::git_commit_committer(&*self.raw); - signature::from_raw_const(self, ptr) - } - } - - /// Amend this existing commit with all non-`None` values - /// - /// This creates a new commit that is exactly the same as the old commit, - /// except that any non-`None` values will be updated. The new commit has - /// the same parents as the old commit. - /// - /// For information about `update_ref`, see `new`. - pub fn amend(&self, - update_ref: Option<&str>, - author: Option<&Signature>, - committer: Option<&Signature>, - message_encoding: Option<&str>, - message: Option<&str>, - tree: Option<&Tree<'repo>>) -> Result { - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - let update_ref = try!(::opt_cstr(update_ref)); - let encoding = try!(::opt_cstr(message_encoding)); - let message = try!(::opt_cstr(message)); - unsafe { - try_call!(raw::git_commit_amend(&mut raw, - self.raw(), - update_ref, - author.map(|s| s.raw()), - committer.map(|s| s.raw()), - encoding, - message, - tree.map(|t| t.raw()))); - Ok(Binding::from_raw(&raw as *const _)) - } - } - - /// Get the specified parent of the commit. - /// - /// Use the `parents` iterator to return an iterator over all parents. - pub fn parent(&self, i: usize) -> Result, Error> { - unsafe { - let mut raw = 0 as *mut raw::git_commit; - try_call!(raw::git_commit_parent(&mut raw, &*self.raw, - i as libc::c_uint)); - Ok(Binding::from_raw(raw)) - } - } - - /// Get the specified parent id of the commit. - /// - /// This is different from `parent`, which will attemptstempt to load the - /// parent commit from the ODB. - /// - /// Use the `parent_ids` iterator to return an iterator over all parents. - pub fn parent_id(&self, i: usize) -> Result { - unsafe { - let id = raw::git_commit_parent_id(self.raw, i as libc::c_uint); - if id.is_null() { - Err(Error::from_str("parent index out of bounds")) - } else { - Ok(Binding::from_raw(id)) - } - } - } - - /// Casts this Commit to be usable as an `Object` - pub fn as_object(&self) -> &Object<'repo> { - unsafe { - &*(self as *const _ as *const Object<'repo>) - } - } - - /// Consumes Commit to be returned as an `Object` - pub fn into_object(self) -> Object<'repo> { - assert_eq!(mem::size_of_val(&self), mem::size_of::()); - unsafe { - mem::transmute(self) - } - } -} - -impl<'repo> Binding for Commit<'repo> { - type Raw = *mut raw::git_commit; - unsafe fn from_raw(raw: *mut raw::git_commit) -> Commit<'repo> { - Commit { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *mut raw::git_commit { self.raw } -} - - -impl<'repo, 'commit> Iterator for Parents<'commit, 'repo> { - type Item = Commit<'repo>; - fn next(&mut self) -> Option> { - self.range.next().map(|i| self.commit.parent(i).unwrap()) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} - -impl<'repo, 'commit> DoubleEndedIterator for Parents<'commit, 'repo> { - fn next_back(&mut self) -> Option> { - self.range.next_back().map(|i| self.commit.parent(i).unwrap()) - } -} - -impl<'repo, 'commit> ExactSizeIterator for Parents<'commit, 'repo> {} - -impl<'commit> Iterator for ParentIds<'commit> { - type Item = Oid; - fn next(&mut self) -> Option { - self.range.next().map(|i| self.commit.parent_id(i).unwrap()) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} - -impl<'commit> DoubleEndedIterator for ParentIds<'commit> { - fn next_back(&mut self) -> Option { - self.range.next_back().map(|i| self.commit.parent_id(i).unwrap()) - } -} - -impl<'commit> ExactSizeIterator for ParentIds<'commit> {} - -impl<'repo> Drop for Commit<'repo> { - fn drop(&mut self) { - unsafe { raw::git_commit_free(self.raw) } - } -} - -#[cfg(test)] -mod tests { - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - let head = repo.head().unwrap(); - let target = head.target().unwrap(); - let mut commit = repo.find_commit(target).unwrap(); - assert_eq!(commit.message(), Some("initial")); - assert_eq!(commit.id(), target); - commit.message_raw().unwrap(); - commit.raw_header().unwrap(); - commit.message_encoding(); - commit.summary().unwrap(); - commit.tree_id(); - commit.tree().unwrap(); - assert_eq!(commit.parents().count(), 0); - - assert_eq!(commit.author().name(), Some("name")); - assert_eq!(commit.author().email(), Some("email")); - assert_eq!(commit.committer().name(), Some("name")); - assert_eq!(commit.committer().email(), Some("email")); - - let sig = repo.signature().unwrap(); - let tree = repo.find_tree(commit.tree_id()).unwrap(); - let id = repo.commit(Some("HEAD"), &sig, &sig, "bar", &tree, - &[&commit]).unwrap(); - let head = repo.find_commit(id).unwrap(); - - let new_head = head.amend(Some("HEAD"), None, None, None, - Some("new message"), None).unwrap(); - let new_head = repo.find_commit(new_head).unwrap(); - assert_eq!(new_head.message(), Some("new message")); - new_head.into_object(); - - repo.find_object(target, None).unwrap().as_commit().unwrap(); - repo.find_object(target, None).unwrap().into_commit().ok().unwrap(); - } -} - diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/config.rs cargo-0.19.0/vendor/git2-0.6.3/src/config.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/config.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/config.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,607 +0,0 @@ -use std::ffi::CString; -use std::marker; -use std::path::{Path, PathBuf}; -use std::str; -use libc; - -use {raw, Error, ConfigLevel, Buf, IntoCString}; -use util::{self, Binding}; - -/// A structure representing a git configuration key/value store -pub struct Config { - raw: *mut raw::git_config, -} - -/// A struct representing a certain entry owned by a `Config` instance. -/// -/// An entry has a name, a value, and a level it applies to. -pub struct ConfigEntry<'cfg> { - raw: *mut raw::git_config_entry, - _marker: marker::PhantomData<&'cfg Config>, - owned: bool, -} - -/// An iterator over the `ConfigEntry` values of a `Config` structure. -pub struct ConfigEntries<'cfg> { - raw: *mut raw::git_config_iterator, - _marker: marker::PhantomData<&'cfg Config>, -} - -impl Config { - /// Allocate a new configuration object - /// - /// This object is empty, so you have to add a file to it before you can do - /// anything with it. - pub fn new() -> Result { - ::init(); - let mut raw = 0 as *mut raw::git_config; - unsafe { - try_call!(raw::git_config_new(&mut raw)); - Ok(Binding::from_raw(raw)) - } - } - - /// Create a new config instance containing a single on-disk file - pub fn open(path: &Path) -> Result { - ::init(); - let mut raw = 0 as *mut raw::git_config; - let path = try!(path.into_c_string()); - unsafe { - try_call!(raw::git_config_open_ondisk(&mut raw, path)); - Ok(Binding::from_raw(raw)) - } - } - - /// Open the global, XDG and system configuration files - /// - /// Utility wrapper that finds the global, XDG and system configuration - /// files and opens them into a single prioritized config object that can - /// be used when accessing default config data outside a repository. - pub fn open_default() -> Result { - ::init(); - let mut raw = 0 as *mut raw::git_config; - unsafe { - try_call!(raw::git_config_open_default(&mut raw)); - Ok(Binding::from_raw(raw)) - } - } - - /// Locate the path to the global configuration file - /// - /// The user or global configuration file is usually located in - /// `$HOME/.gitconfig`. - /// - /// This method will try to guess the full path to that file, if the file - /// exists. The returned path may be used on any method call to load - /// the global configuration file. - /// - /// This method will not guess the path to the xdg compatible config file - /// (`.config/git/config`). - pub fn find_global() -> Result { - ::init(); - let buf = Buf::new(); - unsafe { try_call!(raw::git_config_find_global(buf.raw())); } - Ok(util::bytes2path(&buf).to_path_buf()) - } - - /// Locate the path to the system configuration file - /// - /// If /etc/gitconfig doesn't exist, it will look for %PROGRAMFILES% - pub fn find_system() -> Result { - ::init(); - let buf = Buf::new(); - unsafe { try_call!(raw::git_config_find_system(buf.raw())); } - Ok(util::bytes2path(&buf).to_path_buf()) - } - - /// Locate the path to the global xdg compatible configuration file - /// - /// The xdg compatible configuration file is usually located in - /// `$HOME/.config/git/config`. - pub fn find_xdg() -> Result { - ::init(); - let buf = Buf::new(); - unsafe { try_call!(raw::git_config_find_xdg(buf.raw())); } - Ok(util::bytes2path(&buf).to_path_buf()) - } - - /// Add an on-disk config file instance to an existing config - /// - /// The on-disk file pointed at by path will be opened and parsed; it's - /// expected to be a native Git config file following the default Git config - /// syntax (see man git-config). - /// - /// Further queries on this config object will access each of the config - /// file instances in order (instances with a higher priority level will be - /// accessed first). - pub fn add_file(&mut self, path: &Path, level: ConfigLevel, - force: bool) -> Result<(), Error> { - let path = try!(path.into_c_string()); - unsafe { - try_call!(raw::git_config_add_file_ondisk(self.raw, path, level, - force)); - Ok(()) - } - } - - /// Delete a config variable from the config file with the highest level - /// (usually the local one). - pub fn remove(&mut self, name: &str) -> Result<(), Error> { - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_delete_entry(self.raw, name)); - Ok(()) - } - } - - /// Get the value of a boolean config variable. - /// - /// All config files will be looked into, in the order of their defined - /// level. A higher level means a higher priority. The first occurrence of - /// the variable will be returned here. - pub fn get_bool(&self, name: &str) -> Result { - let mut out = 0 as libc::c_int; - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_get_bool(&mut out, &*self.raw, name)); - - } - Ok(if out == 0 {false} else {true}) - } - - /// Get the value of an integer config variable. - /// - /// All config files will be looked into, in the order of their defined - /// level. A higher level means a higher priority. The first occurrence of - /// the variable will be returned here. - pub fn get_i32(&self, name: &str) -> Result { - let mut out = 0i32; - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_get_int32(&mut out, &*self.raw, name)); - - } - Ok(out) - } - - /// Get the value of an integer config variable. - /// - /// All config files will be looked into, in the order of their defined - /// level. A higher level means a higher priority. The first occurrence of - /// the variable will be returned here. - pub fn get_i64(&self, name: &str) -> Result { - let mut out = 0i64; - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_get_int64(&mut out, &*self.raw, name)); - } - Ok(out) - } - - /// Get the value of a string config variable. - /// - /// This is the same as `get_bytes` except that it may return `Err` if - /// the bytes are not valid utf-8. - pub fn get_str(&self, name: &str) -> Result<&str, Error> { - str::from_utf8(try!(self.get_bytes(name))).map_err(|_| { - Error::from_str("configuration value is not valid utf8") - }) - } - - /// Get the value of a string config variable as a byte slice. - /// - /// This method will return an error if this `Config` is not a snapshot. - pub fn get_bytes(&self, name: &str) -> Result<&[u8], Error> { - let mut ret = 0 as *const libc::c_char; - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_get_string(&mut ret, &*self.raw, name)); - Ok(::opt_bytes(self, ret).unwrap()) - } - } - - /// Get the value of a string config variable as an owned string. - /// - /// An error will be returned if the config value is not valid utf-8. - pub fn get_string(&self, name: &str) -> Result { - let ret = Buf::new(); - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_get_string_buf(ret.raw(), self.raw, name)); - } - str::from_utf8(&ret).map(|s| s.to_string()).map_err(|_| { - Error::from_str("configuration value is not valid utf8") - }) - } - - /// Get the value of a path config variable as an owned . - pub fn get_path(&self, name: &str) -> Result { - let ret = Buf::new(); - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_get_path(ret.raw(), self.raw, name)); - } - Ok(::util::bytes2path(&ret).to_path_buf()) - } - - /// Get the ConfigEntry for a config variable. - pub fn get_entry(&self, name: &str) -> Result { - let mut ret = 0 as *mut raw::git_config_entry; - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_get_entry(&mut ret, self.raw, name)); - Ok(Binding::from_raw(ret)) - } - } - - /// Iterate over all the config variables - /// - /// If `glob` is `Some`, then the iterator will only iterate over all - /// variables whose name matches the pattern. - /// - /// # Example - /// - /// ``` - /// # #![allow(unstable)] - /// use git2::Config; - /// - /// let cfg = Config::new().unwrap(); - /// - /// for entry in &cfg.entries(None).unwrap() { - /// let entry = entry.unwrap(); - /// println!("{} => {}", entry.name().unwrap(), entry.value().unwrap()); - /// } - /// ``` - pub fn entries(&self, glob: Option<&str>) -> Result { - let mut ret = 0 as *mut raw::git_config_iterator; - unsafe { - match glob { - Some(s) => { - let s = try!(CString::new(s)); - try_call!(raw::git_config_iterator_glob_new(&mut ret, - &*self.raw, - s)); - } - None => { - try_call!(raw::git_config_iterator_new(&mut ret, &*self.raw)); - } - } - Ok(Binding::from_raw(ret)) - } - } - - /// Open the global/XDG configuration file according to git's rules - /// - /// Git allows you to store your global configuration at `$HOME/.config` or - /// `$XDG_CONFIG_HOME/git/config`. For backwards compatability, the XDG file - /// shouldn't be used unless the use has created it explicitly. With this - /// function you'll open the correct one to write to. - pub fn open_global(&mut self) -> Result { - let mut raw = 0 as *mut raw::git_config; - unsafe { - try_call!(raw::git_config_open_global(&mut raw, self.raw)); - Ok(Binding::from_raw(raw)) - } - } - - /// Build a single-level focused config object from a multi-level one. - /// - /// The returned config object can be used to perform get/set/delete - /// operations on a single specific level. - pub fn open_level(&self, level: ConfigLevel) -> Result { - let mut raw = 0 as *mut raw::git_config; - unsafe { - try_call!(raw::git_config_open_level(&mut raw, &*self.raw, level)); - Ok(Binding::from_raw(raw)) - } - } - - /// Set the value of a boolean config variable in the config file with the - /// highest level (usually the local one). - pub fn set_bool(&mut self, name: &str, value: bool) -> Result<(), Error> { - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_set_bool(self.raw, name, value)); - } - Ok(()) - } - - /// Set the value of an integer config variable in the config file with the - /// highest level (usually the local one). - pub fn set_i32(&mut self, name: &str, value: i32) -> Result<(), Error> { - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_set_int32(self.raw, name, value)); - } - Ok(()) - } - - /// Set the value of an integer config variable in the config file with the - /// highest level (usually the local one). - pub fn set_i64(&mut self, name: &str, value: i64) -> Result<(), Error> { - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_config_set_int64(self.raw, name, value)); - } - Ok(()) - } - - /// Set the value of an multivar config variable in the config file with the - /// highest level (usually the local one). - pub fn set_multivar(&mut self, name: &str, regexp: &str, value: &str) -> Result<(), Error> { - let name = try!(CString::new(name)); - let regexp = try!(CString::new(regexp)); - let value = try!(CString::new(value)); - unsafe { - try_call!(raw::git_config_set_multivar(self.raw, name, regexp, value)); - } - Ok(()) - } - - /// Set the value of a string config variable in the config file with the - /// highest level (usually the local one). - pub fn set_str(&mut self, name: &str, value: &str) -> Result<(), Error> { - let name = try!(CString::new(name)); - let value = try!(CString::new(value)); - unsafe { - try_call!(raw::git_config_set_string(self.raw, name, value)); - } - Ok(()) - } - - /// Create a snapshot of the configuration - /// - /// Create a snapshot of the current state of a configuration, which allows - /// you to look into a consistent view of the configuration for looking up - /// complex values (e.g. a remote, submodule). - pub fn snapshot(&mut self) -> Result { - let mut ret = 0 as *mut raw::git_config; - unsafe { - try_call!(raw::git_config_snapshot(&mut ret, self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// Parse a string as a bool. - /// Interprets "true", "yes", "on", 1, or any non-zero number as true. - /// Interprets "false", "no", "off", 0, or an empty string as false. - pub fn parse_bool(s: S) -> Result { - let s = try!(s.into_c_string()); - let mut out = 0; - ::init(); - unsafe { - try_call!(raw::git_config_parse_bool(&mut out, s)); - } - Ok(out != 0) - } - - /// Parse a string as an i32; handles suffixes like k, M, or G, and - /// multiplies by the appropriate power of 1024. - pub fn parse_i32(s: S) -> Result { - let s = try!(s.into_c_string()); - let mut out = 0; - ::init(); - unsafe { - try_call!(raw::git_config_parse_int32(&mut out, s)); - } - Ok(out) - } - - /// Parse a string as an i64; handles suffixes like k, M, or G, and - /// multiplies by the appropriate power of 1024. - pub fn parse_i64(s: S) -> Result { - let s = try!(s.into_c_string()); - let mut out = 0; - ::init(); - unsafe { - try_call!(raw::git_config_parse_int64(&mut out, s)); - } - Ok(out) - } -} - -impl Binding for Config { - type Raw = *mut raw::git_config; - unsafe fn from_raw(raw: *mut raw::git_config) -> Config { - Config { raw: raw } - } - fn raw(&self) -> *mut raw::git_config { self.raw } -} - -impl Drop for Config { - fn drop(&mut self) { - unsafe { raw::git_config_free(self.raw) } - } -} - -impl<'cfg> ConfigEntry<'cfg> { - /// Gets the name of this entry. - /// - /// May return `None` if the name is not valid utf-8 - pub fn name(&self) -> Option<&str> { str::from_utf8(self.name_bytes()).ok() } - - /// Gets the name of this entry as a byte slice. - pub fn name_bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, (*self.raw).name).unwrap() } - } - - /// Gets the value of this entry. - /// - /// May return `None` if the value is not valid utf-8 - pub fn value(&self) -> Option<&str> { str::from_utf8(self.value_bytes()).ok() } - - /// Gets the value of this entry as a byte slice. - pub fn value_bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, (*self.raw).value).unwrap() } - } - - /// Gets the configuration level of this entry. - pub fn level(&self) -> ConfigLevel { - unsafe { ConfigLevel::from_raw((*self.raw).level) } - } -} - -impl<'cfg> Binding for ConfigEntry<'cfg> { - type Raw = *mut raw::git_config_entry; - - unsafe fn from_raw(raw: *mut raw::git_config_entry) - -> ConfigEntry<'cfg> { - ConfigEntry { - raw: raw, - _marker: marker::PhantomData, - owned: true, - } - } - fn raw(&self) -> *mut raw::git_config_entry { self.raw } -} - -impl<'cfg> Binding for ConfigEntries<'cfg> { - type Raw = *mut raw::git_config_iterator; - - unsafe fn from_raw(raw: *mut raw::git_config_iterator) - -> ConfigEntries<'cfg> { - ConfigEntries { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *mut raw::git_config_iterator { self.raw } -} - -// entries are only valid until the iterator is freed, so this impl is for -// `&'b T` instead of `T` to have a lifetime to tie them to. -// -// It's also not implemented for `&'b mut T` so we can have multiple entries -// (ok). -impl<'cfg, 'b> Iterator for &'b ConfigEntries<'cfg> { - type Item = Result, Error>; - fn next(&mut self) -> Option, Error>> { - let mut raw = 0 as *mut raw::git_config_entry; - unsafe { - try_call_iter!(raw::git_config_next(&mut raw, self.raw)); - Some(Ok(ConfigEntry { - owned: false, - raw: raw, - _marker: marker::PhantomData, - })) - } - } -} - -impl<'cfg> Drop for ConfigEntries<'cfg> { - fn drop(&mut self) { - unsafe { raw::git_config_iterator_free(self.raw) } - } -} - -impl<'cfg> Drop for ConfigEntry<'cfg> { - fn drop(&mut self) { - if self.owned { - unsafe { raw::git_config_entry_free(self.raw) } - } - } -} - -#[cfg(test)] -mod tests { - use std::fs::File; - use tempdir::TempDir; - - use Config; - - #[test] - fn smoke() { - let _cfg = Config::new().unwrap(); - let _ = Config::find_global(); - let _ = Config::find_system(); - let _ = Config::find_xdg(); - } - - #[test] - fn persisted() { - let td = TempDir::new("test").unwrap(); - let path = td.path().join("foo"); - File::create(&path).unwrap(); - - let mut cfg = Config::open(&path).unwrap(); - assert!(cfg.get_bool("foo.bar").is_err()); - cfg.set_bool("foo.k1", true).unwrap(); - cfg.set_i32("foo.k2", 1).unwrap(); - cfg.set_i64("foo.k3", 2).unwrap(); - cfg.set_str("foo.k4", "bar").unwrap(); - cfg.snapshot().unwrap(); - drop(cfg); - - let cfg = Config::open(&path).unwrap().snapshot().unwrap(); - assert_eq!(cfg.get_bool("foo.k1").unwrap(), true); - assert_eq!(cfg.get_i32("foo.k2").unwrap(), 1); - assert_eq!(cfg.get_i64("foo.k3").unwrap(), 2); - assert_eq!(cfg.get_str("foo.k4").unwrap(), "bar"); - - for entry in &cfg.entries(None).unwrap() { - let entry = entry.unwrap(); - entry.name(); - entry.value(); - entry.level(); - } - } - - #[test] - fn multivar() { - let td = TempDir::new("test").unwrap(); - let path = td.path().join("foo"); - File::create(&path).unwrap(); - - let mut cfg = Config::open(&path).unwrap(); - cfg.set_multivar("foo.bar", "^$", "baz").unwrap(); - cfg.set_multivar("foo.bar", "^$", "qux").unwrap(); - - let mut values: Vec = cfg.entries(None) - .unwrap() - .into_iter() - .map(|entry| entry.unwrap().value().unwrap().into()) - .collect(); - values.sort(); - assert_eq!(values, ["baz", "qux"]); - } - - #[test] - fn parse() { - assert_eq!(Config::parse_bool("").unwrap(), false); - assert_eq!(Config::parse_bool("false").unwrap(), false); - assert_eq!(Config::parse_bool("no").unwrap(), false); - assert_eq!(Config::parse_bool("off").unwrap(), false); - assert_eq!(Config::parse_bool("0").unwrap(), false); - - assert_eq!(Config::parse_bool("true").unwrap(), true); - assert_eq!(Config::parse_bool("yes").unwrap(), true); - assert_eq!(Config::parse_bool("on").unwrap(), true); - assert_eq!(Config::parse_bool("1").unwrap(), true); - assert_eq!(Config::parse_bool("42").unwrap(), true); - - assert!(Config::parse_bool(" ").is_err()); - assert!(Config::parse_bool("some-string").is_err()); - assert!(Config::parse_bool("-").is_err()); - - assert_eq!(Config::parse_i32("0").unwrap(), 0); - assert_eq!(Config::parse_i32("1").unwrap(), 1); - assert_eq!(Config::parse_i32("100").unwrap(), 100); - assert_eq!(Config::parse_i32("-1").unwrap(), -1); - assert_eq!(Config::parse_i32("-100").unwrap(), -100); - assert_eq!(Config::parse_i32("1k").unwrap(), 1024); - assert_eq!(Config::parse_i32("4k").unwrap(), 4096); - assert_eq!(Config::parse_i32("1M").unwrap(), 1048576); - assert_eq!(Config::parse_i32("1G").unwrap(), 1024*1024*1024); - - assert_eq!(Config::parse_i64("0").unwrap(), 0); - assert_eq!(Config::parse_i64("1").unwrap(), 1); - assert_eq!(Config::parse_i64("100").unwrap(), 100); - assert_eq!(Config::parse_i64("-1").unwrap(), -1); - assert_eq!(Config::parse_i64("-100").unwrap(), -100); - assert_eq!(Config::parse_i64("1k").unwrap(), 1024); - assert_eq!(Config::parse_i64("4k").unwrap(), 4096); - assert_eq!(Config::parse_i64("1M").unwrap(), 1048576); - assert_eq!(Config::parse_i64("1G").unwrap(), 1024*1024*1024); - assert_eq!(Config::parse_i64("100G").unwrap(), 100*1024*1024*1024); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/cred.rs cargo-0.19.0/vendor/git2-0.6.3/src/cred.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/cred.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/cred.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,470 +0,0 @@ -use std::ffi::CString; -use std::io::Write; -use std::mem; -use std::path::Path; -use std::process::{Command, Stdio}; -use url; - -use {raw, Error, Config, IntoCString}; -use util::Binding; - -/// A structure to represent git credentials in libgit2. -pub struct Cred { - raw: *mut raw::git_cred, -} - -/// Management of the gitcredentials(7) interface. -pub struct CredentialHelper { - /// A public field representing the currently discovered username from - /// configuration. - pub username: Option, - protocol: Option, - host: Option, - url: String, - commands: Vec, -} - -impl Cred { - /// Create a "default" credential usable for Negotiate mechanisms like NTLM - /// or Kerberos authentication. - pub fn default() -> Result { - ::init(); - let mut out = 0 as *mut raw::git_cred; - unsafe { - try_call!(raw::git_cred_default_new(&mut out)); - Ok(Binding::from_raw(out)) - } - } - - /// Create a new ssh key credential object used for querying an ssh-agent. - /// - /// The username specified is the username to authenticate. - pub fn ssh_key_from_agent(username: &str) -> Result { - ::init(); - let mut out = 0 as *mut raw::git_cred; - let username = try!(CString::new(username)); - unsafe { - try_call!(raw::git_cred_ssh_key_from_agent(&mut out, username)); - Ok(Binding::from_raw(out)) - } - } - - /// Create a new passphrase-protected ssh key credential object. - pub fn ssh_key(username: &str, - publickey: Option<&Path>, - privatekey: &Path, - passphrase: Option<&str>) -> Result { - ::init(); - let username = try!(CString::new(username)); - let publickey = try!(::opt_cstr(publickey)); - let privatekey = try!(privatekey.into_c_string()); - let passphrase = try!(::opt_cstr(passphrase)); - let mut out = 0 as *mut raw::git_cred; - unsafe { - try_call!(raw::git_cred_ssh_key_new(&mut out, username, publickey, - privatekey, passphrase)); - Ok(Binding::from_raw(out)) - } - } - - /// Create a new plain-text username and password credential object. - pub fn userpass_plaintext(username: &str, - password: &str) -> Result { - ::init(); - let username = try!(CString::new(username)); - let password = try!(CString::new(password)); - let mut out = 0 as *mut raw::git_cred; - unsafe { - try_call!(raw::git_cred_userpass_plaintext_new(&mut out, username, - password)); - Ok(Binding::from_raw(out)) - } - } - - /// Attempt to read `credential.helper` according to gitcredentials(7) [1] - /// - /// This function will attempt to parse the user's `credential.helper` - /// configuration, invoke the necessary processes, and read off what the - /// username/password should be for a particular url. - /// - /// The returned credential type will be a username/password credential if - /// successful. - /// - /// [1]: https://www.kernel.org/pub/software/scm/git/docs/gitcredentials.html - pub fn credential_helper(config: &Config, - url: &str, - username: Option<&str>) - -> Result { - match CredentialHelper::new(url).config(config).username(username) - .execute() { - Some((username, password)) => { - Cred::userpass_plaintext(&username, &password) - } - None => Err(Error::from_str("failed to acquire username/password \ - from local configuration")) - } - } - - /// Create a credential to specify a username. - /// - /// THis is used with ssh authentication to query for the username if non is - /// specified in the url. - pub fn username(username: &str) -> Result { - ::init(); - let username = try!(CString::new(username)); - let mut out = 0 as *mut raw::git_cred; - unsafe { - try_call!(raw::git_cred_username_new(&mut out, username)); - Ok(Binding::from_raw(out)) - } - } - - /// Check whether a credential object contains username information. - pub fn has_username(&self) -> bool { - unsafe { raw::git_cred_has_username(self.raw) == 1 } - } - - /// Return the type of credentials that this object represents. - pub fn credtype(&self) -> raw::git_credtype_t { - unsafe { (*self.raw).credtype } - } - - /// Unwrap access to the underlying raw pointer, canceling the destructor - pub unsafe fn unwrap(mut self) -> *mut raw::git_cred { - mem::replace(&mut self.raw, 0 as *mut raw::git_cred) - } -} - -impl Binding for Cred { - type Raw = *mut raw::git_cred; - - unsafe fn from_raw(raw: *mut raw::git_cred) -> Cred { - Cred { raw: raw } - } - fn raw(&self) -> *mut raw::git_cred { self.raw } -} - -impl Drop for Cred { - fn drop(&mut self) { - if !self.raw.is_null() { - unsafe { ((*self.raw).free)(self.raw) } - } - } -} - -impl CredentialHelper { - /// Create a new credential helper object which will be used to probe git's - /// local credential configuration. - /// - /// The url specified is the namespace on which this will query credentials. - /// Invalid urls are currently ignored. - pub fn new(url: &str) -> CredentialHelper { - let mut ret = CredentialHelper { - protocol: None, - host: None, - username: None, - url: url.to_string(), - commands: Vec::new(), - }; - - // Parse out the (protocol, host) if one is available - if let Ok(url) = url::Url::parse(url) { - if let Some(url::Host::Domain(s)) = url.host() { - ret.host = Some(s.to_string()); - } - ret.protocol = Some(url.scheme().to_string()) - } - return ret; - } - - /// Set the username that this credential helper will query with. - /// - /// By default the username is `None`. - pub fn username(&mut self, username: Option<&str>) -> &mut CredentialHelper { - self.username = username.map(|s| s.to_string()); - self - } - - /// Query the specified configuration object to discover commands to - /// execute, usernames to query, etc. - pub fn config(&mut self, config: &Config) -> &mut CredentialHelper { - // Figure out the configured username/helper program. - // - // see http://git-scm.com/docs/gitcredentials.html#_configuration_options - // - // TODO: implement useHttpPath - if self.username.is_none() { - self.config_username(config); - } - self.config_helper(config); - self - } - - // Configure the queried username from `config` - fn config_username(&mut self, config: &Config) { - let key = self.exact_key("username"); - self.username = config.get_string(&key).ok().or_else(|| { - self.url_key("username").and_then(|s| { - config.get_string(&s).ok() - }) - }).or_else(|| { - config.get_string("credential.username").ok() - }) - } - - // Discover all `helper` directives from `config` - fn config_helper(&mut self, config: &Config) { - let exact = config.get_string(&self.exact_key("helper")); - self.add_command(exact.as_ref().ok().map(|s| &s[..])); - match self.url_key("helper") { - Some(key) => { - let url = config.get_string(&key); - self.add_command(url.as_ref().ok().map(|s| &s[..])); - } - None => {} - } - let global = config.get_string("credential.helper"); - self.add_command(global.as_ref().ok().map(|s| &s[..])); - } - - // Add a `helper` configured command to the list of commands to execute. - // - // see https://www.kernel.org/pub/software/scm/git/docs/technical - // /api-credentials.html#_credential_helpers - fn add_command(&mut self, cmd: Option<&str>) { - let cmd = match cmd { - Some("") | None => return, - Some(s) => s, - }; - - if cmd.starts_with("!") { - self.commands.push(cmd[1..].to_string()); - } else if cmd.starts_with("/") || cmd.starts_with("\\") || - cmd[1..].starts_with(":\\") { - self.commands.push(format!("\"{}\"", cmd)); - } else { - self.commands.push(format!("git credential-{}", cmd)); - } - } - - fn exact_key(&self, name: &str) -> String { - format!("credential.{}.{}", self.url, name) - } - - fn url_key(&self, name: &str) -> Option { - match (&self.host, &self.protocol) { - (&Some(ref host), &Some(ref protocol)) => { - Some(format!("credential.{}://{}.{}", protocol, host, name)) - } - _ => None - } - } - - /// Execute this helper, attempting to discover a username/password pair. - /// - /// All I/O errors are ignored, (to match git behavior), and this function - /// only succeeds if both a username and a password were found - pub fn execute(&self) -> Option<(String, String)> { - let mut username = self.username.clone(); - let mut password = None; - for cmd in self.commands.iter() { - let (u, p) = self.execute_cmd(&cmd, &username); - if u.is_some() && username.is_none() { - username = u; - } - if p.is_some() && password.is_none() { - password = p; - } - if username.is_some() && password.is_some() { break } - } - - match (username, password) { - (Some(u), Some(p)) => Some((u, p)), - _ => None, - } - } - - // Execute the given `cmd`, providing the appropriate variables on stdin and - // then afterwards parsing the output into the username/password on stdout. - fn execute_cmd(&self, cmd: &str, username: &Option) - -> (Option, Option) { - macro_rules! my_try( ($e:expr) => ( - match $e { Ok(e) => e, Err(..) => return (None, None) } - ) ); - - let mut p = my_try!(Command::new("sh").arg("-c") - .arg(&format!("{} get", cmd)) - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn()); - // Ignore write errors as the command may not actually be listening for - // stdin - { - let stdin = p.stdin.as_mut().unwrap(); - match self.protocol { - Some(ref p) => { let _ = writeln!(stdin, "protocol={}", p); } - None => {} - } - match self.host { - Some(ref p) => { let _ = writeln!(stdin, "host={}", p); } - None => {} - } - match *username { - Some(ref p) => { let _ = writeln!(stdin, "username={}", p); } - None => {} - } - } - let output = my_try!(p.wait_with_output()); - if !output.status.success() { return (None, None) } - return self.parse_output(output.stdout) - } - - // Parse the output of a command into the username/password found - fn parse_output(&self, output: Vec) -> (Option, Option) { - // Parse the output of the command, looking for username/password - let mut username = None; - let mut password = None; - for line in output.split(|t| *t == b'\n') { - let mut parts = line.splitn(2, |t| *t == b'='); - let key = parts.next().unwrap(); - let value = match parts.next() { Some(s) => s, None => continue }; - let value = match String::from_utf8(value.to_vec()) { - Ok(s) => s, - Err(..) => continue, - }; - match key { - b"username" => username = Some(value), - b"password" => password = Some(value), - _ => {} - } - } - (username, password) - } -} - -#[cfg(all(test, feature = "unstable"))] -mod test { - use std::env; - use std::fs::File; - use std::io::prelude::*; - use std::path::Path; - use tempdir::TempDir; - - use {Cred, Config, CredentialHelper, ConfigLevel}; - - macro_rules! cfg( ($($k:expr => $v:expr),*) => ({ - let td = TempDir::new("git2-rs").unwrap(); - let mut cfg = Config::new().unwrap(); - cfg.add_file(&td.path().join("cfg"), ConfigLevel::Highest, false).unwrap(); - $(cfg.set_str($k, $v).unwrap();)* - cfg - }) ); - - #[test] - fn smoke() { - Cred::default().unwrap(); - } - - #[test] - fn credential_helper1() { - let cfg = cfg! { - "credential.helper" => "!f() { echo username=a; echo password=b; }; f" - }; - let (u, p) = CredentialHelper::new("https://example.com/foo/bar") - .config(&cfg) - .execute().unwrap(); - assert_eq!(u, "a"); - assert_eq!(p, "b"); - } - - #[test] - fn credential_helper2() { - let cfg = cfg! {}; - assert!(CredentialHelper::new("https://example.com/foo/bar") - .config(&cfg) - .execute().is_none()); - } - - #[test] - fn credential_helper3() { - let cfg = cfg! { - "credential.https://example.com.helper" => - "!f() { echo username=c; }; f", - "credential.helper" => "!f() { echo username=a; echo password=b; }; f" - }; - let (u, p) = CredentialHelper::new("https://example.com/foo/bar") - .config(&cfg) - .execute().unwrap(); - assert_eq!(u, "c"); - assert_eq!(p, "b"); - } - - #[test] - fn credential_helper4() { - let td = TempDir::new("git2-rs").unwrap(); - let path = td.path().join("script"); - File::create(&path).unwrap().write(br"\ -#!/bin/sh -echo username=c -").unwrap(); - chmod(&path); - let cfg = cfg! { - "credential.https://example.com.helper" => - &path.display().to_string()[..], - "credential.helper" => "!f() { echo username=a; echo password=b; }; f" - }; - let (u, p) = CredentialHelper::new("https://example.com/foo/bar") - .config(&cfg) - .execute().unwrap(); - assert_eq!(u, "c"); - assert_eq!(p, "b"); - } - - #[test] - fn credential_helper5() { - let td = TempDir::new("git2-rs").unwrap(); - let path = td.path().join("git-credential-script"); - File::create(&path).unwrap().write(br"\ -#!/bin/sh -echo username=c -").unwrap(); - chmod(&path); - - let paths = env::var("PATH").unwrap(); - let paths = env::split_paths(&paths) - .chain(path.parent().map(|p| p.to_path_buf()).into_iter()); - env::set_var("PATH", &env::join_paths(paths).unwrap()); - - let cfg = cfg! { - "credential.https://example.com.helper" => "script", - "credential.helper" => "!f() { echo username=a; echo password=b; }; f" - }; - let (u, p) = CredentialHelper::new("https://example.com/foo/bar") - .config(&cfg) - .execute().unwrap(); - assert_eq!(u, "c"); - assert_eq!(p, "b"); - } - - #[test] - fn credential_helper6() { - let cfg = cfg! { - "credential.helper" => "" - }; - assert!(CredentialHelper::new("https://example.com/foo/bar") - .config(&cfg) - .execute().is_none()); - } - - #[cfg(unix)] - fn chmod(path: &Path) { - use std::os::unix::prelude::*; - use std::fs; - let mut perms = fs::metadata(path).unwrap().permissions(); - perms.set_mode(0o755); - fs::set_permissions(path, perms).unwrap(); - } - #[cfg(windows)] - fn chmod(_path: &Path) {} -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/describe.rs cargo-0.19.0/vendor/git2-0.6.3/src/describe.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/describe.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/describe.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,186 +0,0 @@ -use std::marker; -use std::mem; -use std::ffi::CString; - -use libc::{c_uint, c_int}; - -use {raw, Repository, Error, Buf}; -use util::Binding; - -/// The result of a `describe` operation on either an `Describe` or a -/// `Repository`. -pub struct Describe<'repo> { - raw: *mut raw::git_describe_result, - _marker: marker::PhantomData<&'repo Repository>, -} - -/// Options which indicate how a `Describe` is created. -pub struct DescribeOptions { - raw: raw::git_describe_options, - pattern: CString, -} - -/// Options which can be used to customize how a description is formatted. -pub struct DescribeFormatOptions { - raw: raw::git_describe_format_options, - dirty_suffix: CString, -} - -impl<'repo> Describe<'repo> { - /// Prints this describe result, returning the result as a string. - pub fn format(&self, opts: Option<&DescribeFormatOptions>) - -> Result { - let buf = Buf::new(); - let opts = opts.map(|o| &o.raw as *const _).unwrap_or(0 as *const _); - unsafe { - try_call!(raw::git_describe_format(buf.raw(), self.raw, opts)); - } - Ok(String::from_utf8(buf.to_vec()).unwrap()) - } -} - -impl<'repo> Binding for Describe<'repo> { - type Raw = *mut raw::git_describe_result; - - unsafe fn from_raw(raw: *mut raw::git_describe_result) -> Describe<'repo> { - Describe { raw: raw, _marker: marker::PhantomData, } - } - fn raw(&self) -> *mut raw::git_describe_result { self.raw } -} - -impl<'repo> Drop for Describe<'repo> { - fn drop(&mut self) { - unsafe { raw::git_describe_result_free(self.raw) } - } -} - -impl DescribeFormatOptions { - /// Creates a new blank set of formatting options for a description. - pub fn new() -> DescribeFormatOptions { - let mut opts = DescribeFormatOptions { - raw: unsafe { mem::zeroed() }, - dirty_suffix: CString::new(Vec::new()).unwrap(), - }; - opts.raw.version = 1; - opts.raw.abbreviated_size = 7; - return opts - } - - /// Sets the size of the abbreviated commit id to use. - /// - /// The value is the lower bound for the length of the abbreviated string, - /// and the default is 7. - pub fn abbreviated_size(&mut self, size: u32) -> &mut Self { - self.raw.abbreviated_size = size as c_uint; - self - } - - /// Sets whether or not the long format is used even when a shorter name - /// could be used. - pub fn always_use_long_format(&mut self, long: bool) -> &mut Self { - self.raw.always_use_long_format = long as c_int; - self - } - - /// If the workdir is dirty and this is set, this string will be appended to - /// the description string. - pub fn dirty_suffix(&mut self, suffix: &str) -> &mut Self { - self.dirty_suffix = CString::new(suffix).unwrap(); - self.raw.dirty_suffix = self.dirty_suffix.as_ptr(); - self - } -} - -impl DescribeOptions { - /// Creates a new blank set of formatting options for a description. - pub fn new() -> DescribeOptions { - let mut opts = DescribeOptions { - raw: unsafe { mem::zeroed() }, - pattern: CString::new(Vec::new()).unwrap(), - }; - opts.raw.version = 1; - opts.raw.max_candidates_tags = 10; - return opts - } - - #[allow(missing_docs)] - pub fn max_candidates_tags(&mut self, max: u32) -> &mut Self { - self.raw.max_candidates_tags = max as c_uint; - self - } - - /// Sets the reference lookup strategy - /// - /// This behaves like the `--tags` option to git-decribe. - pub fn describe_tags(&mut self) -> &mut Self { - self.raw.describe_strategy = raw::GIT_DESCRIBE_TAGS as c_uint; - self - } - - /// Sets the reference lookup strategy - /// - /// This behaves like the `--all` option to git-decribe. - pub fn describe_all(&mut self) -> &mut Self { - self.raw.describe_strategy = raw::GIT_DESCRIBE_ALL as c_uint; - self - } - - /// Indicates when calculating the distance from the matching tag or - /// reference whether to only walk down the first-parent ancestry. - pub fn only_follow_first_parent(&mut self, follow: bool) -> &mut Self { - self.raw.only_follow_first_parent = follow as c_int; - self - } - - /// If no matching tag or reference is found whether a describe option would - /// normally fail. This option indicates, however, that it will instead fall - /// back to showing the full id of the commit. - pub fn show_commit_oid_as_fallback(&mut self, show: bool) -> &mut Self { - self.raw.show_commit_oid_as_fallback = show as c_int; - self - } - - #[allow(missing_docs)] - pub fn pattern(&mut self, pattern: &str) -> &mut Self { - self.pattern = CString::new(pattern).unwrap(); - self.raw.pattern = self.pattern.as_ptr(); - self - } -} - -impl Binding for DescribeOptions { - type Raw = *mut raw::git_describe_options; - - unsafe fn from_raw(_raw: *mut raw::git_describe_options) - -> DescribeOptions { - panic!("unimplemened") - } - fn raw(&self) -> *mut raw::git_describe_options { - &self.raw as *const _ as *mut _ - } -} - -#[cfg(test)] -mod tests { - use DescribeOptions; - - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - let head = t!(repo.head()).target().unwrap(); - - let d = t!(repo.describe(DescribeOptions::new() - .show_commit_oid_as_fallback(true))); - let id = head.to_string(); - assert_eq!(t!(d.format(None)), &id[..7]); - - let obj = t!(repo.find_object(head, None)); - let sig = t!(repo.signature()); - t!(repo.tag("foo", &obj, &sig, "message", true)); - let d = t!(repo.describe(&DescribeOptions::new())); - assert_eq!(t!(d.format(None)), "foo"); - - let d = t!(obj.describe(&DescribeOptions::new())); - assert_eq!(t!(d.format(None)), "foo"); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/diff.rs cargo-0.19.0/vendor/git2-0.6.3/src/diff.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/diff.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/diff.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1237 +0,0 @@ -use std::ffi::CString; -use std::marker; -use std::mem; -use std::ops::Range; -use std::path::Path; -use std::slice; -use libc::{c_char, size_t, c_void, c_int}; - -use {raw, panic, Buf, Delta, Oid, Repository, Error, DiffFormat}; -use {DiffStatsFormat, IntoCString}; -use util::{self, Binding}; - -/// The diff object that contains all individual file deltas. -/// -/// This is an opaque structure which will be allocated by one of the diff -/// generator functions on the `Repository` structure (e.g. `diff_tree_to_tree` -/// or other `diff_*` functions). -pub struct Diff<'repo> { - raw: *mut raw::git_diff, - _marker: marker::PhantomData<&'repo Repository>, -} - -unsafe impl<'repo> Send for Diff<'repo> {} - -/// Description of changes to one entry. -pub struct DiffDelta<'a> { - raw: *mut raw::git_diff_delta, - _marker: marker::PhantomData<&'a raw::git_diff_delta>, -} - -/// Description of one side of a delta. -/// -/// Although this is called a "file" it could represent a file, a symbolic -/// link, a submodule commit id, or even a tree (although that only happens if -/// you are tracking type changes or ignored/untracked directories). -pub struct DiffFile<'a> { - raw: *const raw::git_diff_file, - _marker: marker::PhantomData<&'a raw::git_diff_file>, -} - -/// Structure describing options about how the diff should be executed. -pub struct DiffOptions { - pathspec: Vec, - pathspec_ptrs: Vec<*const c_char>, - old_prefix: Option, - new_prefix: Option, - raw: raw::git_diff_options, -} - -/// Control behavior of rename and copy detection -pub struct DiffFindOptions { - raw: raw::git_diff_find_options, -} - -/// An iterator over the diffs in a delta -pub struct Deltas<'diff> { - range: Range, - diff: &'diff Diff<'diff>, -} - -/// Structure describing a line (or data span) of a diff. -pub struct DiffLine<'a> { - raw: *const raw::git_diff_line, - _marker: marker::PhantomData<&'a raw::git_diff_line>, -} - -/// Structure describing a hunk of a diff. -pub struct DiffHunk<'a> { - raw: *const raw::git_diff_hunk, - _marker: marker::PhantomData<&'a raw::git_diff_hunk>, -} - -/// Structure describing a hunk of a diff. -pub struct DiffStats { - raw: *mut raw::git_diff_stats, -} - -/// Structure describing the binary contents of a diff. -pub struct DiffBinary<'a> { - raw: *const raw::git_diff_binary, - _marker: marker::PhantomData<&'a raw::git_diff_binary>, -} - -/// The contents of one of the files in a binary diff. -pub struct DiffBinaryFile<'a> { - raw: *const raw::git_diff_binary_file, - _marker: marker::PhantomData<&'a raw::git_diff_binary_file>, -} - -/// When producing a binary diff, the binary data returned will be -/// either the deflated full ("literal") contents of the file, or -/// the deflated binary delta between the two sides (whichever is -/// smaller). -#[derive(Copy, Clone, Debug)] -pub enum DiffBinaryKind { - /// There is no binary delta - None, - /// The binary data is the literal contents of the file - Literal, - /// The binary data is the delta from one side to the other - Delta, -} - -type PrintCb<'a> = FnMut(DiffDelta, Option, DiffLine) -> bool + 'a; - -pub type FileCb<'a> = FnMut(DiffDelta, f32) -> bool + 'a; -pub type BinaryCb<'a> = FnMut(DiffDelta, DiffBinary) -> bool + 'a; -pub type HunkCb<'a> = FnMut(DiffDelta, DiffHunk) -> bool + 'a; -pub type LineCb<'a> = FnMut(DiffDelta, Option, DiffLine) -> bool + 'a; - -struct ForeachCallbacks<'a, 'b: 'a, 'c, 'd: 'c, 'e, 'f: 'e, 'g, 'h: 'g> { - file: &'a mut FileCb<'b>, - binary: Option<&'c mut BinaryCb<'d>>, - hunk: Option<&'e mut HunkCb<'f>>, - line: Option<&'g mut LineCb<'h>>, -} - -impl<'repo> Diff<'repo> { - /// Merge one diff into another. - /// - /// This merges items from the "from" list into the "self" list. The - /// resulting diff will have all items that appear in either list. - /// If an item appears in both lists, then it will be "merged" to appear - /// as if the old version was from the "onto" list and the new version - /// is from the "from" list (with the exception that if the item has a - /// pending DELETE in the middle, then it will show as deleted). - pub fn merge(&mut self, from: &Diff<'repo>) -> Result<(), Error> { - unsafe { try_call!(raw::git_diff_merge(self.raw, &*from.raw)); } - Ok(()) - } - - /// Returns an iterator over the deltas in this diff. - pub fn deltas(&self) -> Deltas { - let num_deltas = unsafe { raw::git_diff_num_deltas(&*self.raw) }; - Deltas { range: 0..(num_deltas as usize), diff: self } - } - - /// Return the diff delta for an entry in the diff list. - pub fn get_delta(&self, i: usize) -> Option { - unsafe { - let ptr = raw::git_diff_get_delta(&*self.raw, i as size_t); - Binding::from_raw_opt(ptr as *mut _) - } - } - - /// Check if deltas are sorted case sensitively or insensitively. - pub fn is_sorted_icase(&self) -> bool { - unsafe { raw::git_diff_is_sorted_icase(&*self.raw) == 1 } - } - - /// Iterate over a diff generating formatted text output. - /// - /// Returning `false` from the callback will terminate the iteration and - /// return an error from this function. - pub fn print(&self, format: DiffFormat, mut cb: F) -> Result<(), Error> - where F: FnMut(DiffDelta, - Option, - DiffLine) -> bool { - let mut cb: &mut PrintCb = &mut cb; - let ptr = &mut cb as *mut _; - unsafe { - try_call!(raw::git_diff_print(self.raw, format, print_cb, - ptr as *mut _)); - return Ok(()) - } - } - - /// Loop over all deltas in a diff issuing callbacks. - /// - /// Returning `false` from any callback will terminate the iteration and - /// return an error from this function. - pub fn foreach(&self, - file_cb: &mut FileCb, - binary_cb: Option<&mut BinaryCb>, - hunk_cb: Option<&mut HunkCb>, - line_cb: Option<&mut LineCb>) -> Result<(), Error> { - let mut cbs = ForeachCallbacks { - file: file_cb, - binary: binary_cb, - hunk: hunk_cb, - line: line_cb, - }; - let ptr = &mut cbs as *mut _; - unsafe { - let binary_cb_c = if cbs.binary.is_some() { - Some(binary_cb_c as raw::git_diff_binary_cb) - } else { - None - }; - let hunk_cb_c = if cbs.hunk.is_some() { - Some(hunk_cb_c as raw::git_diff_hunk_cb) - } else { - None - }; - let line_cb_c = if cbs.line.is_some() { - Some(line_cb_c as raw::git_diff_line_cb) - } else { - None - }; - try_call!(raw::git_diff_foreach(self.raw, file_cb_c, binary_cb_c, - hunk_cb_c, line_cb_c, - ptr as *mut _)); - return Ok(()) - } - } - - /// Accumulate diff statistics for all patches. - pub fn stats(&self) -> Result { - let mut ret = 0 as *mut raw::git_diff_stats; - unsafe { - try_call!(raw::git_diff_get_stats(&mut ret, self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// Transform a diff marking file renames, copies, etc. - /// - /// This modifies a diff in place, replacing old entries that look like - /// renames or copies with new entries reflecting those changes. This also - /// will, if requested, break modified files into add/remove pairs if the - /// amount of change is above a threshold. - pub fn find_similar(&mut self, opts: Option<&mut DiffFindOptions>) - -> Result<(), Error> { - let opts = opts.map(|opts| &opts.raw); - unsafe { try_call!(raw::git_diff_find_similar(self.raw, opts)); } - Ok(()) - } - - // TODO: num_deltas_of_type, format_email, find_similar -} - -pub extern fn print_cb(delta: *const raw::git_diff_delta, - hunk: *const raw::git_diff_hunk, - line: *const raw::git_diff_line, - data: *mut c_void) -> c_int { - unsafe { - let delta = Binding::from_raw(delta as *mut _); - let hunk = Binding::from_raw_opt(hunk); - let line = Binding::from_raw(line); - - let r = panic::wrap(|| { - let data = data as *mut &mut PrintCb; - (*data)(delta, hunk, line) - }); - if r == Some(true) {0} else {-1} - } -} - -extern fn file_cb_c(delta: *const raw::git_diff_delta, - progress: f32, - data: *mut c_void) -> c_int { - unsafe { - let delta = Binding::from_raw(delta as *mut _); - - let r = panic::wrap(|| { - let cbs = data as *mut ForeachCallbacks; - ((*cbs).file)(delta, progress) - }); - if r == Some(true) {0} else {-1} - } -} - -extern fn binary_cb_c(delta: *const raw::git_diff_delta, - binary: *const raw::git_diff_binary, - data: *mut c_void) -> c_int { - unsafe { - let delta = Binding::from_raw(delta as *mut _); - let binary = Binding::from_raw(binary); - - let r = panic::wrap(|| { - let cbs = data as *mut ForeachCallbacks; - match (*cbs).binary { - Some(ref mut cb) => cb(delta, binary), - None => false, - } - }); - if r == Some(true) {0} else {-1} - } -} - -extern fn hunk_cb_c(delta: *const raw::git_diff_delta, - hunk: *const raw::git_diff_hunk, - data: *mut c_void) -> c_int { - unsafe { - let delta = Binding::from_raw(delta as *mut _); - let hunk = Binding::from_raw(hunk); - - let r = panic::wrap(|| { - let cbs = data as *mut ForeachCallbacks; - match (*cbs).hunk { - Some(ref mut cb) => cb(delta, hunk), - None => false, - } - }); - if r == Some(true) {0} else {-1} - } -} - -extern fn line_cb_c(delta: *const raw::git_diff_delta, - hunk: *const raw::git_diff_hunk, - line: *const raw::git_diff_line, - data: *mut c_void) -> c_int { - unsafe { - let delta = Binding::from_raw(delta as *mut _); - let hunk = Binding::from_raw_opt(hunk); - let line = Binding::from_raw(line); - - let r = panic::wrap(|| { - let cbs = data as *mut ForeachCallbacks; - match (*cbs).line { - Some(ref mut cb) => cb(delta, hunk, line), - None => false, - } - }); - if r == Some(true) {0} else {-1} - } -} - - -impl<'repo> Binding for Diff<'repo> { - type Raw = *mut raw::git_diff; - unsafe fn from_raw(raw: *mut raw::git_diff) -> Diff<'repo> { - Diff { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *mut raw::git_diff { self.raw } -} - -impl<'repo> Drop for Diff<'repo> { - fn drop(&mut self) { - unsafe { raw::git_diff_free(self.raw) } - } -} - -impl<'a> DiffDelta<'a> { - // TODO: expose when diffs are more exposed - // pub fn similarity(&self) -> u16 { - // unsafe { (*self.raw).similarity } - // } - - /// Returns the number of files in this delta. - pub fn nfiles(&self) -> u16 { - unsafe { (*self.raw).nfiles } - } - - /// Returns the status of this entry - /// - /// For more information, see `Delta`'s documentation - pub fn status(&self) -> Delta { - match unsafe { (*self.raw).status } { - raw::GIT_DELTA_UNMODIFIED => Delta::Unmodified, - raw::GIT_DELTA_ADDED => Delta::Added, - raw::GIT_DELTA_DELETED => Delta::Deleted, - raw::GIT_DELTA_MODIFIED => Delta::Modified, - raw::GIT_DELTA_RENAMED => Delta::Renamed, - raw::GIT_DELTA_COPIED => Delta::Copied, - raw::GIT_DELTA_IGNORED => Delta::Ignored, - raw::GIT_DELTA_UNTRACKED => Delta::Untracked, - raw::GIT_DELTA_TYPECHANGE => Delta::Typechange, - raw::GIT_DELTA_UNREADABLE => Delta::Unreadable, - raw::GIT_DELTA_CONFLICTED => Delta::Conflicted, - n => panic!("unknown diff status: {}", n), - } - } - - /// Return the file which represents the "from" side of the diff. - /// - /// What side this means depends on the function that was used to generate - /// the diff and will be documented on the function itself. - pub fn old_file(&self) -> DiffFile<'a> { - unsafe { Binding::from_raw(&(*self.raw).old_file as *const _) } - } - - /// Return the file which represents the "to" side of the diff. - /// - /// What side this means depends on the function that was used to generate - /// the diff and will be documented on the function itself. - pub fn new_file(&self) -> DiffFile<'a> { - unsafe { Binding::from_raw(&(*self.raw).new_file as *const _) } - } -} - -impl<'a> Binding for DiffDelta<'a> { - type Raw = *mut raw::git_diff_delta; - unsafe fn from_raw(raw: *mut raw::git_diff_delta) -> DiffDelta<'a> { - DiffDelta { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *mut raw::git_diff_delta { self.raw } -} - -impl<'a> DiffFile<'a> { - /// Returns the Oid of this item. - /// - /// If this entry represents an absent side of a diff (e.g. the `old_file` - /// of a `Added` delta), then the oid returned will be zeroes. - pub fn id(&self) -> Oid { - unsafe { Binding::from_raw(&(*self.raw).id as *const _) } - } - - /// Returns the path, in bytes, of the entry relative to the working - /// directory of the repository. - pub fn path_bytes(&self) -> Option<&'a [u8]> { - static FOO: () = (); - unsafe { ::opt_bytes(&FOO, (*self.raw).path) } - } - - /// Returns the path of the entry relative to the working directory of the - /// repository. - pub fn path(&self) -> Option<&'a Path> { - self.path_bytes().map(util::bytes2path) - } - - /// Returns the size of this entry, in bytes - pub fn size(&self) -> u64 { unsafe { (*self.raw).size as u64 } } - - // TODO: expose flags/mode -} - -impl<'a> Binding for DiffFile<'a> { - type Raw = *const raw::git_diff_file; - unsafe fn from_raw(raw: *const raw::git_diff_file) -> DiffFile<'a> { - DiffFile { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *const raw::git_diff_file { self.raw } -} - -impl DiffOptions { - /// Creates a new set of empty diff options. - /// - /// All flags and other options are defaulted to false or their otherwise - /// zero equivalents. - pub fn new() -> DiffOptions { - let mut opts = DiffOptions { - pathspec: Vec::new(), - pathspec_ptrs: Vec::new(), - raw: unsafe { mem::zeroed() }, - old_prefix: None, - new_prefix: None, - }; - assert_eq!(unsafe { - raw::git_diff_init_options(&mut opts.raw, 1) - }, 0); - opts - } - - fn flag(&mut self, opt: u32, val: bool) -> &mut DiffOptions { - if val { - self.raw.flags |= opt; - } else { - self.raw.flags &= !opt; - } - self - } - - /// Flag indicating whether the sides of the diff will be reversed. - pub fn reverse(&mut self, reverse: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_REVERSE, reverse) - } - - /// Flag indicating whether ignored files are included. - pub fn include_ignored(&mut self, include: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_INCLUDE_IGNORED, include) - } - - /// Flag indicating whether ignored directories are traversed deeply or not. - pub fn recurse_ignored_dirs(&mut self, recurse: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_RECURSE_IGNORED_DIRS, recurse) - } - - /// Flag indicating whether untracked files are in the diff - pub fn include_untracked(&mut self, include: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_INCLUDE_UNTRACKED, include) - } - - /// Flag indicating whether untracked directories are deeply traversed or - /// not. - pub fn recurse_untracked_dirs(&mut self, recurse: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_RECURSE_UNTRACKED_DIRS, recurse) - } - - /// Flag indicating whether unmodified files are in the diff. - pub fn include_unmodified(&mut self, include: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_INCLUDE_UNMODIFIED, include) - } - - /// If entrabled, then Typechange delta records are generated. - pub fn include_typechange(&mut self, include: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_INCLUDE_TYPECHANGE, include) - } - - /// Event with `include_typechange`, the tree treturned generally shows a - /// deleted blow. This flag correctly labels the tree transitions as a - /// typechange record with the `new_file`'s mode set to tree. - /// - /// Note that the tree SHA will not be available. - pub fn include_typechange_trees(&mut self, include: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_INCLUDE_TYPECHANGE_TREES, include) - } - - /// Flag indicating whether file mode changes are ignored. - pub fn ignore_filemode(&mut self, ignore: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_IGNORE_FILEMODE, ignore) - } - - /// Flag indicating whether all submodules should be treated as unmodified. - pub fn ignore_submodules(&mut self, ignore: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_IGNORE_SUBMODULES, ignore) - } - - /// Flag indicating whether case insensitive filenames should be used. - pub fn ignore_case(&mut self, ignore: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_IGNORE_CASE, ignore) - } - - /// If pathspecs are specified, this flag means that they should be applied - /// as an exact match instead of a fnmatch pattern. - pub fn disable_pathspec_match(&mut self, disable: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_DISABLE_PATHSPEC_MATCH, disable) - } - - /// Disable updating the `binary` flag in delta records. This is useful when - /// iterating over a diff if you don't need hunk and data callbacks and want - /// to avoid having to load a file completely. - pub fn skip_binary_check(&mut self, skip: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_SKIP_BINARY_CHECK, skip) - } - - /// When diff finds an untracked directory, to match the behavior of core - /// Git, it scans the contents for ignored and untracked files. If all - /// contents are ignored, then the directory is ignored; if any contents are - /// not ignored, then the directory is untracked. This is extra work that - /// may not matter in many cases. - /// - /// This flag turns off that scan and immediately labels an untracked - /// directory as untracked (changing the behavior to not match core git). - pub fn enable_fast_untracked_dirs(&mut self, enable: bool) - -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_ENABLE_FAST_UNTRACKED_DIRS, enable) - } - - /// When diff finds a file in the working directory with stat information - /// different from the index, but the OID ends up being the same, write the - /// correct stat information into the index. Note: without this flag, diff - /// will always leave the index untouched. - pub fn update_index(&mut self, update: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_UPDATE_INDEX, update) - } - - /// Include unreadable files in the diff - pub fn include_unreadable(&mut self, include: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_INCLUDE_UNREADABLE, include) - } - - /// Include unreadable files in the diff - pub fn include_unreadable_as_untracked(&mut self, include: bool) - -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_INCLUDE_UNREADABLE_AS_UNTRACKED, include) - } - - /// Treat all files as text, disabling binary attributes and detection. - pub fn force_text(&mut self, force: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_FORCE_TEXT, force) - } - - /// Treat all files as binary, disabling text diffs - pub fn force_binary(&mut self, force: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_FORCE_TEXT, force) - } - - /// Ignore all whitespace - pub fn ignore_whitespace(&mut self, ignore: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_IGNORE_WHITESPACE, ignore) - } - - /// Ignore changes in the amount of whitespace - pub fn ignore_whitespace_change(&mut self, ignore: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_IGNORE_WHITESPACE_CHANGE, ignore) - } - - /// Ignore whitespace at tend of line - pub fn ignore_whitespace_eol(&mut self, ignore: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_IGNORE_WHITESPACE_EOL, ignore) - } - - /// When generating patch text, include the content of untracked files. - /// - /// This automatically turns on `include_untracked` but it does not turn on - /// `recurse_untracked_dirs`. Add that flag if you want the content of every - /// single untracked file. - pub fn show_untracked_content(&mut self, show: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_SHOW_UNTRACKED_CONTENT, show) - } - - /// When generating output, include the names of unmodified files if they - /// are included in the `Diff`. Normally these are skipped in the formats - /// that list files (e.g. name-only, name-status, raw). Even with this these - /// will not be included in the patch format. - pub fn show_unmodified(&mut self, show: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_SHOW_UNMODIFIED, show) - } - - /// Use the "patience diff" algorithm - pub fn patience(&mut self, patience: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_PATIENCE, patience) - } - - /// Take extra time to find the minimal diff - pub fn minimal(&mut self, minimal: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_MINIMAL, minimal) - } - - /// Include the necessary deflate/delta information so that `git-apply` can - /// apply given diff information to binary files. - pub fn show_binary(&mut self, show: bool) -> &mut DiffOptions { - self.flag(raw::GIT_DIFF_SHOW_BINARY, show) - } - - /// Set the number of unchanged lines that define the boundary of a hunk - /// (and to display before and after). - /// - /// The default value for this is 3. - pub fn context_lines(&mut self, lines: u32) -> &mut DiffOptions { - self.raw.context_lines = lines; - self - } - - /// Set the maximum number of unchanged lines between hunk boundaries before - /// the hunks will be merged into one. - /// - /// The default value for this is 0. - pub fn interhunk_lines(&mut self, lines: u32) -> &mut DiffOptions { - self.raw.interhunk_lines = lines; - self - } - - /// The default value for this is `core.abbrev` or 7 if unset. - pub fn id_abbrev(&mut self, abbrev: u16) -> &mut DiffOptions { - self.raw.id_abbrev = abbrev; - self - } - - /// Maximum size (in bytes) above which a blob will be marked as binary - /// automatically. - /// - /// A negative value will disable this entirely. - /// - /// The default value for this is 512MB. - pub fn max_size(&mut self, size: i64) -> &mut DiffOptions { - self.raw.max_size = size as raw::git_off_t; - self - } - - /// The virtual "directory" to prefix old file names with in hunk headers. - /// - /// The default value for this is "a". - pub fn old_prefix(&mut self, t: T) -> &mut DiffOptions { - self.old_prefix = Some(t.into_c_string().unwrap()); - self - } - - /// The virtual "directory" to prefix new file names with in hunk headers. - /// - /// The default value for this is "b". - pub fn new_prefix(&mut self, t: T) -> &mut DiffOptions { - self.new_prefix = Some(t.into_c_string().unwrap()); - self - } - - /// Add to the array of paths/fnmatch patterns to constrain the diff. - pub fn pathspec(&mut self, pathspec: T) - -> &mut DiffOptions { - let s = pathspec.into_c_string().unwrap(); - self.pathspec_ptrs.push(s.as_ptr()); - self.pathspec.push(s); - self - } - - /// Acquire a pointer to the underlying raw options. - /// - /// This function is unsafe as the pointer is only valid so long as this - /// structure is not moved, modified, or used elsewhere. - pub unsafe fn raw(&mut self) -> *const raw::git_diff_options { - self.raw.old_prefix = self.old_prefix.as_ref().map(|s| s.as_ptr()) - .unwrap_or(0 as *const _); - self.raw.new_prefix = self.new_prefix.as_ref().map(|s| s.as_ptr()) - .unwrap_or(0 as *const _); - self.raw.pathspec.count = self.pathspec_ptrs.len() as size_t; - self.raw.pathspec.strings = self.pathspec_ptrs.as_ptr() as *mut _; - &self.raw as *const _ - } - - // TODO: expose ignore_submodules, notify_cb/notify_payload -} - -impl<'diff> Iterator for Deltas<'diff> { - type Item = DiffDelta<'diff>; - fn next(&mut self) -> Option> { - self.range.next().and_then(|i| self.diff.get_delta(i)) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} -impl<'diff> DoubleEndedIterator for Deltas<'diff> { - fn next_back(&mut self) -> Option> { - self.range.next_back().and_then(|i| self.diff.get_delta(i)) - } -} -impl<'diff> ExactSizeIterator for Deltas<'diff> {} - -impl<'a> DiffLine<'a> { - /// Line number in old file or `None` for added line - pub fn old_lineno(&self) -> Option { - match unsafe { (*self.raw).old_lineno } { - n if n < 0 => None, - n => Some(n as u32), - } - } - - /// Line number in new file or `None` for deleted line - pub fn new_lineno(&self) -> Option { - match unsafe { (*self.raw).new_lineno } { - n if n < 0 => None, - n => Some(n as u32), - } - } - - /// Number of newline characters in content - pub fn num_lines(&self) -> u32 { - unsafe { (*self.raw).num_lines as u32 } - } - - /// Offset in the original file to the content - pub fn content_offset(&self) -> i64 { - unsafe { (*self.raw).content_offset as i64 } - } - - /// Content of this line as bytes. - pub fn content(&self) -> &[u8] { - unsafe { - slice::from_raw_parts((*self.raw).content as *const u8, - (*self.raw).content_len as usize) - } - } - - /// Sigil showing the origin of this `DiffLine`. - /// - /// * ` ` - Line context - /// * `+` - Line addition - /// * `-` - Line deletion - /// * `=` - Context (End of file) - /// * `>` - Add (End of file) - /// * `<` - Remove (End of file) - /// * `F` - File header - /// * `H` - Hunk header - /// * `B` - Line binary - pub fn origin(&self) -> char { - match unsafe { (*self.raw).origin as raw::git_diff_line_t } { - raw::GIT_DIFF_LINE_CONTEXT => ' ', - raw::GIT_DIFF_LINE_ADDITION => '+', - raw::GIT_DIFF_LINE_DELETION => '-', - raw::GIT_DIFF_LINE_CONTEXT_EOFNL => '=', - raw::GIT_DIFF_LINE_ADD_EOFNL => '>', - raw::GIT_DIFF_LINE_DEL_EOFNL => '<', - raw::GIT_DIFF_LINE_FILE_HDR => 'F', - raw::GIT_DIFF_LINE_HUNK_HDR => 'H', - raw::GIT_DIFF_LINE_BINARY => 'B', - _ => ' ', - } - } -} - -impl<'a> Binding for DiffLine<'a> { - type Raw = *const raw::git_diff_line; - unsafe fn from_raw(raw: *const raw::git_diff_line) -> DiffLine<'a> { - DiffLine { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *const raw::git_diff_line { self.raw } -} - -impl<'a> DiffHunk<'a> { - /// Starting line number in old_file - pub fn old_start(&self) -> u32 { - unsafe { (*self.raw).old_start as u32 } - } - - /// Number of lines in old_file - pub fn old_lines(&self) -> u32 { - unsafe { (*self.raw).old_lines as u32 } - } - - /// Starting line number in new_file - pub fn new_start(&self) -> u32 { - unsafe { (*self.raw).new_start as u32 } - } - - /// Number of lines in new_file - pub fn new_lines(&self) -> u32 { - unsafe { (*self.raw).new_lines as u32 } - } - - /// Header text - pub fn header(&self) -> &[u8] { - unsafe { - slice::from_raw_parts((*self.raw).header.as_ptr() as *const u8, - (*self.raw).header_len as usize) - } - } -} - -impl<'a> Binding for DiffHunk<'a> { - type Raw = *const raw::git_diff_hunk; - unsafe fn from_raw(raw: *const raw::git_diff_hunk) -> DiffHunk<'a> { - DiffHunk { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *const raw::git_diff_hunk { self.raw } -} - -impl DiffStats { - /// Get the total number of files chaned in a diff. - pub fn files_changed(&self) -> usize { - unsafe { raw::git_diff_stats_files_changed(&*self.raw) as usize } - } - - /// Get the total number of insertions in a diff - pub fn insertions(&self) -> usize { - unsafe { raw::git_diff_stats_insertions(&*self.raw) as usize } - } - - /// Get the total number of deletions in a diff - pub fn deletions(&self) -> usize { - unsafe { raw::git_diff_stats_deletions(&*self.raw) as usize } - } - - /// Print diff statistics to a Buf - pub fn to_buf(&self, format: DiffStatsFormat, width: usize) - -> Result { - let buf = Buf::new(); - unsafe { - try_call!(raw::git_diff_stats_to_buf(buf.raw(), self.raw, - format.bits(), - width as size_t)); - } - Ok(buf) - } -} - -impl Binding for DiffStats { - type Raw = *mut raw::git_diff_stats; - - unsafe fn from_raw(raw: *mut raw::git_diff_stats) -> DiffStats { - DiffStats { raw: raw } - } - fn raw(&self) -> *mut raw::git_diff_stats { self.raw } -} - -impl Drop for DiffStats { - fn drop(&mut self) { - unsafe { raw::git_diff_stats_free(self.raw) } - } -} - -impl<'a> DiffBinary<'a> { - /// Returns whether there is data in this binary structure or not. - /// - /// If this is `true`, then this was produced and included binary content. - /// If this is `false` then this was generated knowing only that a binary - /// file changed but without providing the data, probably from a patch that - /// said `Binary files a/file.txt and b/file.txt differ`. - pub fn contains_data(&self) -> bool { - unsafe { (*self.raw).contains_data == 1 } - } - - /// The contents of the old file. - pub fn old_file(&self) -> DiffBinaryFile<'a> { - unsafe { Binding::from_raw(&(*self.raw).old_file as *const _) } - } - - /// The contents of the new file. - pub fn new_file(&self) -> DiffBinaryFile<'a> { - unsafe { Binding::from_raw(&(*self.raw).new_file as *const _) } - } -} - -impl<'a> Binding for DiffBinary<'a> { - type Raw = *const raw::git_diff_binary; - unsafe fn from_raw(raw: *const raw::git_diff_binary) -> DiffBinary<'a> { - DiffBinary { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *const raw::git_diff_binary { self.raw } -} - -impl<'a> DiffBinaryFile<'a> { - /// The type of binary data for this file - pub fn kind(&self) -> DiffBinaryKind { - unsafe { Binding::from_raw((*self.raw).kind) } - } - - /// The binary data, deflated - pub fn data(&self) -> &[u8] { - unsafe { - slice::from_raw_parts((*self.raw).data as *const u8, - (*self.raw).datalen as usize) - } - } - - /// The length of the binary data after inflation - pub fn inflated_len(&self) -> usize { - unsafe { (*self.raw).inflatedlen as usize } - } - -} - -impl<'a> Binding for DiffBinaryFile<'a> { - type Raw = *const raw::git_diff_binary_file; - unsafe fn from_raw(raw: *const raw::git_diff_binary_file) -> DiffBinaryFile<'a> { - DiffBinaryFile { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *const raw::git_diff_binary_file { self.raw } -} - -impl Binding for DiffBinaryKind { - type Raw = raw::git_diff_binary_t; - unsafe fn from_raw(raw: raw::git_diff_binary_t) -> DiffBinaryKind { - match raw { - raw::GIT_DIFF_BINARY_NONE => DiffBinaryKind::None, - raw::GIT_DIFF_BINARY_LITERAL => DiffBinaryKind::Literal, - raw::GIT_DIFF_BINARY_DELTA => DiffBinaryKind::Delta, - _ => panic!("Unknown git diff binary kind"), - } - } - fn raw(&self) -> raw::git_diff_binary_t { - match *self { - DiffBinaryKind::None => raw::GIT_DIFF_BINARY_NONE, - DiffBinaryKind::Literal => raw::GIT_DIFF_BINARY_LITERAL, - DiffBinaryKind::Delta => raw::GIT_DIFF_BINARY_DELTA, - } - } -} - -impl DiffFindOptions { - /// Creates a new set of empty diff find options. - /// - /// All flags and other options are defaulted to false or their otherwise - /// zero equivalents. - pub fn new() -> DiffFindOptions { - let mut opts = DiffFindOptions { - raw: unsafe { mem::zeroed() }, - }; - assert_eq!(unsafe { - raw::git_diff_find_init_options(&mut opts.raw, 1) - }, 0); - opts - } - - fn flag(&mut self, opt: u32, val: bool) -> &mut DiffFindOptions { - if val { - self.raw.flags |= opt; - } else { - self.raw.flags &= !opt; - } - self - } - - /// Reset all flags back to their unset state, indicating that - /// `diff.renames` should be used instead. This is overridden once any flag - /// is set. - pub fn by_config(&mut self) -> &mut DiffFindOptions { - self.flag(0xffffffff, false) - } - - /// Look for renames? - pub fn renames(&mut self, find: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_RENAMES, find) - } - - /// Consider old side of modified for renames? - pub fn renames_from_rewrites(&mut self, find: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_RENAMES_FROM_REWRITES, find) - } - - /// Look for copies? - pub fn copies(&mut self, find: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_COPIES, find) - } - - /// Consider unmodified as copy sources? - /// - /// For this to work correctly, use `include_unmodified` when the initial - /// diff is being generated. - pub fn copies_from_unmodified(&mut self, find: bool) - -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_COPIES_FROM_UNMODIFIED, find) - } - - /// Mark significant rewrites for split. - pub fn rewrites(&mut self, find: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_REWRITES, find) - } - - /// Actually split large rewrites into delete/add pairs - pub fn break_rewrites(&mut self, find: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_BREAK_REWRITES, find) - } - - #[doc(hidden)] - pub fn break_rewries(&mut self, find: bool) -> &mut DiffFindOptions { - self.break_rewrites(find) - } - - /// Find renames/copies for untracked items in working directory. - /// - /// For this to work correctly use the `include_untracked` option when the - /// initial diff is being generated. - pub fn for_untracked(&mut self, find: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_FOR_UNTRACKED, find) - } - - /// Turn on all finding features. - pub fn all(&mut self, find: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_ALL, find) - } - - /// Measure similarity ignoring leading whitespace (default) - pub fn ignore_leading_whitespace(&mut self, ignore: bool) - -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_IGNORE_LEADING_WHITESPACE, ignore) - } - - /// Measure similarity ignoring all whitespace - pub fn ignore_whitespace(&mut self, ignore: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_IGNORE_WHITESPACE, ignore) - } - - /// Measure similarity including all data - pub fn dont_ignore_whitespace(&mut self, dont: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_DONT_IGNORE_WHITESPACE, dont) - } - - /// Measure similarity only by comparing SHAs (fast and cheap) - pub fn exact_match_only(&mut self, exact: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_EXACT_MATCH_ONLY, exact) - } - - /// Do not break rewrites unless they contribute to a rename. - /// - /// Normally, `break_rewrites` and `rewrites` will measure the - /// self-similarity of modified files and split the ones that have changed a - /// lot into a delete/add pair. Then the sides of that pair will be - /// considered candidates for rename and copy detection - /// - /// If you add this flag in and the split pair is not used for an actual - /// rename or copy, then the modified record will be restored to a regular - /// modified record instead of being split. - pub fn break_rewrites_for_renames_only(&mut self, b: bool) - -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_BREAK_REWRITES_FOR_RENAMES_ONLY, b) - } - - /// Remove any unmodified deltas after find_similar is done. - /// - /// Using `copies_from_unmodified` to emulate the `--find-copies-harder` - /// behavior requires building a diff with the `include_unmodified` flag. If - /// you do not want unmodified records in the final result, pas this flag to - /// have them removed. - pub fn remove_unmodified(&mut self, remove: bool) -> &mut DiffFindOptions { - self.flag(raw::GIT_DIFF_FIND_REMOVE_UNMODIFIED, remove) - } - - /// Similarity to consider a file renamed (default 50) - pub fn rename_threshold(&mut self, thresh: u16) -> &mut DiffFindOptions { - self.raw.rename_threshold = thresh; - self - } - - /// Similarity of modified to be glegible rename source (default 50) - pub fn rename_from_rewrite_threshold(&mut self, thresh: u16) - -> &mut DiffFindOptions { - self.raw.rename_from_rewrite_threshold = thresh; - self - } - - /// Similarity to consider a file copy (default 50) - pub fn copy_threshold(&mut self, thresh: u16) -> &mut DiffFindOptions { - self.raw.copy_threshold = thresh; - self - } - - /// Similarity to split modify into delete/add pair (default 60) - pub fn break_rewrite_threshold(&mut self, thresh: u16) - -> &mut DiffFindOptions { - self.raw.break_rewrite_threshold = thresh; - self - } - - /// Maximum similarity sources to examine for a file (somewhat like - /// git-diff's `-l` option or `diff.renameLimit` config) - /// - /// Defaults to 200 - pub fn rename_limit(&mut self, limit: usize) -> &mut DiffFindOptions { - self.raw.rename_limit = limit as size_t; - self - } - - // TODO: expose git_diff_similarity_metric -} - -#[cfg(test)] -mod tests { - use DiffOptions; - use std::fs::File; - use std::path::Path; - use std::borrow::Borrow; - use std::io::Write; - - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - let diff = repo.diff_tree_to_workdir(None, None).unwrap(); - assert_eq!(diff.deltas().len(), 0); - let stats = diff.stats().unwrap(); - assert_eq!(stats.insertions(), 0); - assert_eq!(stats.deletions(), 0); - assert_eq!(stats.files_changed(), 0); - } - - #[test] - fn foreach_smoke() { - let (_td, repo) = ::test::repo_init(); - let diff = t!(repo.diff_tree_to_workdir(None, None)); - let mut count = 0; - t!(diff.foreach(&mut |_file, _progress| { count = count + 1; true }, - None, None, None)); - assert_eq!(count, 0); - } - - #[test] - fn foreach_file_only() { - let path = Path::new("foo"); - let (td, repo) = ::test::repo_init(); - t!(t!(File::create(&td.path().join(path))).write_all(b"bar")); - let mut opts = DiffOptions::new(); - opts.include_untracked(true); - let diff = t!(repo.diff_tree_to_workdir(None, Some(&mut opts))); - let mut count = 0; - let mut result = None; - t!(diff.foreach(&mut |file, _progress| { - count = count + 1; - result = file.new_file().path().map(ToOwned::to_owned); - true - }, None, None, None)); - assert_eq!(result.as_ref().map(Borrow::borrow), Some(path)); - assert_eq!(count, 1); - } - - #[test] - fn foreach_file_and_hunk() { - let path = Path::new("foo"); - let (td, repo) = ::test::repo_init(); - t!(t!(File::create(&td.path().join(path))).write_all(b"bar")); - let mut index = t!(repo.index()); - t!(index.add_path(path)); - let mut opts = DiffOptions::new(); - opts.include_untracked(true); - let diff = t!(repo.diff_tree_to_index(None, Some(&index), - Some(&mut opts))); - let mut new_lines = 0; - t!(diff.foreach( - &mut |_file, _progress| { true }, - None, - Some(&mut |_file, hunk| { - new_lines = hunk.new_lines(); - true - }), - None)); - assert_eq!(new_lines, 1); - } - - #[test] - fn foreach_all_callbacks() { - let fib = vec![0, 1, 1, 2, 3, 5, 8]; - // Verified with a node implementation of deflate, might be worth - // adding a deflate lib to do this inline here. - let deflated_fib = vec![120, 156, 99, 96, 100, 100, 98, 102, 229, 0, 0, - 0, 53, 0, 21]; - let foo_path = Path::new("foo"); - let bin_path = Path::new("bin"); - let (td, repo) = ::test::repo_init(); - t!(t!(File::create(&td.path().join(foo_path))).write_all(b"bar\n")); - t!(t!(File::create(&td.path().join(bin_path))).write_all(&fib)); - let mut index = t!(repo.index()); - t!(index.add_path(foo_path)); - t!(index.add_path(bin_path)); - let mut opts = DiffOptions::new(); - opts.include_untracked(true).show_binary(true); - let diff = t!(repo.diff_tree_to_index(None, Some(&index), - Some(&mut opts))); - let mut bin_content = None; - let mut new_lines = 0; - let mut line_content = None; - t!(diff.foreach( - &mut |_file, _progress| { true }, - Some(&mut |_file, binary| { - bin_content = Some(binary.new_file().data().to_owned()); - true - }), - Some(&mut |_file, hunk| { - new_lines = hunk.new_lines(); - true - }), - Some(&mut |_file, _hunk, line| { - line_content = String::from_utf8(line.content().into()).ok(); - true - }))); - assert_eq!(bin_content, Some(deflated_fib)); - assert_eq!(new_lines, 1); - assert_eq!(line_content, Some("bar\n".to_string())); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/error.rs cargo-0.19.0/vendor/git2-0.6.3/src/error.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/error.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/error.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,237 +0,0 @@ -use std::env::JoinPathsError; -use std::ffi::{CStr, NulError}; -use std::error; -use std::fmt; -use std::str; -use libc::c_int; - -use {raw, ErrorClass, ErrorCode}; - -/// A structure to represent errors coming out of libgit2. -#[derive(Debug,PartialEq)] -pub struct Error { - code: c_int, - klass: c_int, - message: String, -} - -impl Error { - /// Returns the last error, or `None` if one is not available. - pub fn last_error(code: c_int) -> Option { - ::init(); - unsafe { - let ptr = raw::giterr_last(); - if ptr.is_null() { - None - } else { - Some(Error::from_raw(code, ptr)) - } - } - } - - unsafe fn from_raw(code: c_int, ptr: *const raw::git_error) -> Error { - let msg = CStr::from_ptr((*ptr).message as *const _).to_bytes(); - let msg = str::from_utf8(msg).unwrap(); - Error { code: code, klass: (*ptr).klass, message: msg.to_string() } - } - - /// Creates a new error from the given string as the error. - pub fn from_str(s: &str) -> Error { - Error { - code: raw::GIT_ERROR as c_int, - klass: raw::GITERR_NONE as c_int, - message: s.to_string(), - } - } - - /// Return the error code associated with this error. - pub fn code(&self) -> ErrorCode { - match self.raw_code() { - raw::GIT_OK => super::ErrorCode::GenericError, - raw::GIT_ERROR => super::ErrorCode::GenericError, - raw::GIT_ENOTFOUND => super::ErrorCode::NotFound, - raw::GIT_EEXISTS => super::ErrorCode::Exists, - raw::GIT_EAMBIGUOUS => super::ErrorCode::Ambiguous, - raw::GIT_EBUFS => super::ErrorCode::BufSize, - raw::GIT_EUSER => super::ErrorCode::User, - raw::GIT_EBAREREPO => super::ErrorCode::BareRepo, - raw::GIT_EUNBORNBRANCH => super::ErrorCode::UnbornBranch, - raw::GIT_EUNMERGED => super::ErrorCode::Unmerged, - raw::GIT_ENONFASTFORWARD => super::ErrorCode::NotFastForward, - raw::GIT_EINVALIDSPEC => super::ErrorCode::InvalidSpec, - raw::GIT_ECONFLICT => super::ErrorCode::Conflict, - raw::GIT_ELOCKED => super::ErrorCode::Locked, - raw::GIT_EMODIFIED => super::ErrorCode::Modified, - raw::GIT_PASSTHROUGH => super::ErrorCode::GenericError, - raw::GIT_ITEROVER => super::ErrorCode::GenericError, - raw::GIT_EAUTH => super::ErrorCode::Auth, - raw::GIT_ECERTIFICATE => super::ErrorCode::Certificate, - raw::GIT_EAPPLIED => super::ErrorCode::Applied, - raw::GIT_EPEEL => super::ErrorCode::Peel, - raw::GIT_EEOF => super::ErrorCode::Eof, - raw::GIT_EINVALID => super::ErrorCode::Invalid, - raw::GIT_EUNCOMMITTED => super::ErrorCode::Uncommitted, - raw::GIT_EDIRECTORY => super::ErrorCode::Directory, - _ => super::ErrorCode::GenericError, - } - } - - /// Return the error class associated with this error. - pub fn class(&self) -> ErrorClass { - match self.raw_class() { - raw::GITERR_NONE => super::ErrorClass::None, - raw::GITERR_NOMEMORY => super::ErrorClass::NoMemory, - raw::GITERR_OS => super::ErrorClass::Os, - raw::GITERR_INVALID => super::ErrorClass::Invalid, - raw::GITERR_REFERENCE => super::ErrorClass::Reference, - raw::GITERR_ZLIB => super::ErrorClass::Zlib, - raw::GITERR_REPOSITORY => super::ErrorClass::Repository, - raw::GITERR_CONFIG => super::ErrorClass::Config, - raw::GITERR_REGEX => super::ErrorClass::Regex, - raw::GITERR_ODB => super::ErrorClass::Odb, - raw::GITERR_INDEX => super::ErrorClass::Index, - raw::GITERR_OBJECT => super::ErrorClass::Object, - raw::GITERR_NET => super::ErrorClass::Net, - raw::GITERR_TAG => super::ErrorClass::Tag, - raw::GITERR_TREE => super::ErrorClass::Tree, - raw::GITERR_INDEXER => super::ErrorClass::Indexer, - raw::GITERR_SSL => super::ErrorClass::Ssl, - raw::GITERR_SUBMODULE => super::ErrorClass::Submodule, - raw::GITERR_THREAD => super::ErrorClass::Thread, - raw::GITERR_STASH => super::ErrorClass::Stash, - raw::GITERR_CHECKOUT => super::ErrorClass::Checkout, - raw::GITERR_FETCHHEAD => super::ErrorClass::FetchHead, - raw::GITERR_MERGE => super::ErrorClass::Merge, - raw::GITERR_SSH => super::ErrorClass::Ssh, - raw::GITERR_FILTER => super::ErrorClass::Filter, - raw::GITERR_REVERT => super::ErrorClass::Revert, - raw::GITERR_CALLBACK => super::ErrorClass::Callback, - raw::GITERR_CHERRYPICK => super::ErrorClass::CherryPick, - raw::GITERR_DESCRIBE => super::ErrorClass::Describe, - raw::GITERR_REBASE => super::ErrorClass::Rebase, - raw::GITERR_FILESYSTEM => super::ErrorClass::Filesystem, - _ => super::ErrorClass::None, - } - } - - /// Return the raw error code associated with this error. - pub fn raw_code(&self) -> raw::git_error_code { - macro_rules! check( ($($e:ident,)*) => ( - $(if self.code == raw::$e as c_int { raw::$e }) else * - else { - raw::GIT_ERROR - } - ) ); - check!( - GIT_OK, - GIT_ERROR, - GIT_ENOTFOUND, - GIT_EEXISTS, - GIT_EAMBIGUOUS, - GIT_EBUFS, - GIT_EUSER, - GIT_EBAREREPO, - GIT_EUNBORNBRANCH, - GIT_EUNMERGED, - GIT_ENONFASTFORWARD, - GIT_EINVALIDSPEC, - GIT_ECONFLICT, - GIT_ELOCKED, - GIT_EMODIFIED, - GIT_EAUTH, - GIT_ECERTIFICATE, - GIT_EAPPLIED, - GIT_EPEEL, - GIT_EEOF, - GIT_EINVALID, - GIT_EUNCOMMITTED, - GIT_PASSTHROUGH, - GIT_ITEROVER, - ) - } - - /// Return the raw error class associated with this error. - pub fn raw_class(&self) -> raw::git_error_t { - macro_rules! check( ($($e:ident,)*) => ( - $(if self.klass == raw::$e as c_int { raw::$e }) else * - else { - raw::GITERR_NONE - } - ) ); - check!( - GITERR_NONE, - GITERR_NOMEMORY, - GITERR_OS, - GITERR_INVALID, - GITERR_REFERENCE, - GITERR_ZLIB, - GITERR_REPOSITORY, - GITERR_CONFIG, - GITERR_REGEX, - GITERR_ODB, - GITERR_INDEX, - GITERR_OBJECT, - GITERR_NET, - GITERR_TAG, - GITERR_TREE, - GITERR_INDEXER, - GITERR_SSL, - GITERR_SUBMODULE, - GITERR_THREAD, - GITERR_STASH, - GITERR_CHECKOUT, - GITERR_FETCHHEAD, - GITERR_MERGE, - GITERR_SSH, - GITERR_FILTER, - GITERR_REVERT, - GITERR_CALLBACK, - GITERR_CHERRYPICK, - GITERR_DESCRIBE, - GITERR_REBASE, - GITERR_FILESYSTEM, - ) - } - - /// Return the message associated with this error - pub fn message(&self) -> &str { &self.message } -} - -impl error::Error for Error { - fn description(&self) -> &str { &self.message } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(write!(f, "[{}/{}] ", self.klass, self.code)); - f.write_str(&self.message) - } -} - -impl From for Error { - fn from(_: NulError) -> Error { - Error::from_str("data contained a nul byte that could not be \ - represented as a string") - } -} - -impl From for Error { - fn from(e: JoinPathsError) -> Error { - Error::from_str(error::Error::description(&e)) - } -} - - -#[cfg(test)] -mod tests { - use {ErrorClass, ErrorCode}; - - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - - let err = repo.find_submodule("does_not_exist").err().unwrap(); - assert_eq!(err.code(), ErrorCode::NotFound); - assert_eq!(err.class(), ErrorClass::Submodule); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/index.rs cargo-0.19.0/vendor/git2-0.6.3/src/index.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/index.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/index.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,620 +0,0 @@ -use std::ffi::{CStr, OsString, CString}; -use std::ops::Range; -use std::path::Path; -use std::slice; - -use libc::{c_int, c_uint, size_t, c_void, c_char}; - -use {raw, panic, Repository, Error, Tree, Oid, IndexAddOption, IndexTime}; -use IntoCString; -use util::{self, Binding}; - -/// A structure to represent a git [index][1] -/// -/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects -pub struct Index { - raw: *mut raw::git_index, -} - -/// An iterator over the entries in an index -pub struct IndexEntries<'index> { - range: Range, - index: &'index Index, -} - -/// A callback function to filter index matches. -/// -/// Used by `Index::{add_all,remove_all,update_all}`. The first argument is the -/// path, and the second is the patchspec that matched it. Return 0 to confirm -/// the operation on the item, > 0 to skip the item, and < 0 to abort the scan. -pub type IndexMatchedPath<'a> = FnMut(&Path, &[u8]) -> i32 + 'a; - -/// A structure to represent an entry or a file inside of an index. -/// -/// All fields of an entry are public for modification and inspection. This is -/// also how a new index entry is created. -#[allow(missing_docs)] -pub struct IndexEntry { - pub ctime: IndexTime, - pub mtime: IndexTime, - pub dev: u32, - pub ino: u32, - pub mode: u32, - pub uid: u32, - pub gid: u32, - pub file_size: u32, - pub id: Oid, - pub flags: u16, - pub flags_extended: u16, - pub path: Vec, -} - -impl Index { - /// Creates a new in-memory index. - /// - /// This index object cannot be read/written to the filesystem, but may be - /// used to perform in-memory index operations. - pub fn new() -> Result { - ::init(); - let mut raw = 0 as *mut raw::git_index; - unsafe { - try_call!(raw::git_index_new(&mut raw)); - Ok(Binding::from_raw(raw)) - } - } - - /// Create a new bare Git index object as a memory representation of the Git - /// index file in 'index_path', without a repository to back it. - /// - /// Since there is no ODB or working directory behind this index, any Index - /// methods which rely on these (e.g. add_path) will fail. - /// - /// If you need an index attached to a repository, use the `index()` method - /// on `Repository`. - pub fn open(index_path: &Path) -> Result { - ::init(); - let mut raw = 0 as *mut raw::git_index; - let index_path = try!(index_path.into_c_string()); - unsafe { - try_call!(raw::git_index_open(&mut raw, index_path)); - Ok(Binding::from_raw(raw)) - } - } - - /// Add or update an index entry from an in-memory struct - /// - /// If a previous index entry exists that has the same path and stage as the - /// given 'source_entry', it will be replaced. Otherwise, the 'source_entry' - /// will be added. - pub fn add(&mut self, entry: &IndexEntry) -> Result<(), Error> { - let path = try!(CString::new(&entry.path[..])); - - // libgit2 encodes the length of the path in the lower bits of the - // `flags` entry, so mask those out and recalculate here to ensure we - // don't corrupt anything. - let mut flags = entry.flags & !raw::GIT_IDXENTRY_NAMEMASK; - - if entry.path.len() < raw::GIT_IDXENTRY_NAMEMASK as usize { - flags |= entry.path.len() as u16; - } else { - flags |= raw::GIT_IDXENTRY_NAMEMASK; - } - - unsafe { - let raw = raw::git_index_entry { - dev: entry.dev, - ino: entry.ino, - mode: entry.mode, - uid: entry.uid, - gid: entry.gid, - file_size: entry.file_size, - id: *entry.id.raw(), - flags: flags, - flags_extended: entry.flags_extended, - path: path.as_ptr(), - mtime: raw::git_index_time { - seconds: entry.mtime.seconds(), - nanoseconds: entry.mtime.nanoseconds(), - }, - ctime: raw::git_index_time { - seconds: entry.ctime.seconds(), - nanoseconds: entry.ctime.nanoseconds(), - }, - }; - try_call!(raw::git_index_add(self.raw, &raw)); - Ok(()) - } - } - - /// Add or update an index entry from a file on disk - /// - /// The file path must be relative to the repository's working folder and - /// must be readable. - /// - /// This method will fail in bare index instances. - /// - /// This forces the file to be added to the index, not looking at gitignore - /// rules. - /// - /// If this file currently is the result of a merge conflict, this file will - /// no longer be marked as conflicting. The data about the conflict will be - /// moved to the "resolve undo" (REUC) section. - pub fn add_path(&mut self, path: &Path) -> Result<(), Error> { - // Git apparently expects '/' to be separators for paths - let mut posix_path = OsString::new(); - for (i, comp) in path.components().enumerate() { - if i != 0 { posix_path.push("/"); } - posix_path.push(comp.as_os_str()); - } - let posix_path = try!(posix_path.into_c_string()); - unsafe { - try_call!(raw::git_index_add_bypath(self.raw, posix_path)); - Ok(()) - } - } - - /// Add or update index entries matching files in the working directory. - /// - /// This method will fail in bare index instances. - /// - /// The `pathspecs` are a list of file names or shell glob patterns that - /// will matched against files in the repository's working directory. Each - /// file that matches will be added to the index (either updating an - /// existing entry or adding a new entry). You can disable glob expansion - /// and force exact matching with the `AddDisablePathspecMatch` flag. - /// - /// Files that are ignored will be skipped (unlike `add_path`). If a file is - /// already tracked in the index, then it will be updated even if it is - /// ignored. Pass the `AddForce` flag to skip the checking of ignore rules. - /// - /// To emulate `git add -A` and generate an error if the pathspec contains - /// the exact path of an ignored file (when not using `AddForce`), add the - /// `AddCheckPathspec` flag. This checks that each entry in `pathspecs` - /// that is an exact match to a filename on disk is either not ignored or - /// already in the index. If this check fails, the function will return - /// an error. - /// - /// To emulate `git add -A` with the "dry-run" option, just use a callback - /// function that always returns a positive value. See below for details. - /// - /// If any files are currently the result of a merge conflict, those files - /// will no longer be marked as conflicting. The data about the conflicts - /// will be moved to the "resolve undo" (REUC) section. - /// - /// If you provide a callback function, it will be invoked on each matching - /// item in the working directory immediately before it is added to / - /// updated in the index. Returning zero will add the item to the index, - /// greater than zero will skip the item, and less than zero will abort the - /// scan an return an error to the caller. - pub fn add_all(&mut self, - pathspecs: I, - flag: IndexAddOption, - mut cb: Option<&mut IndexMatchedPath>) - -> Result<(), Error> - where T: IntoCString, I: IntoIterator, - { - let (_a, _b, raw_strarray) = try!(::util::iter2cstrs(pathspecs)); - let ptr = cb.as_mut(); - let callback = ptr.as_ref().map(|_| { - index_matched_path_cb as raw::git_index_matched_path_cb - }); - unsafe { - try_call!(raw::git_index_add_all(self.raw, - &raw_strarray, - flag.bits() as c_uint, - callback, - ptr.map(|p| p as *mut _) - .unwrap_or(0 as *mut _) - as *mut c_void)); - } - return Ok(()); - } - - /// Clear the contents (all the entries) of an index object. - /// - /// This clears the index object in memory; changes must be explicitly - /// written to disk for them to take effect persistently via `write_*`. - pub fn clear(&mut self) -> Result<(), Error> { - unsafe { try_call!(raw::git_index_clear(self.raw)); } - Ok(()) - } - - /// Get the count of entries currently in the index - pub fn len(&self) -> usize { - unsafe { raw::git_index_entrycount(&*self.raw) as usize } - } - - /// Get one of the entries in the index by its position. - pub fn get(&self, n: usize) -> Option { - unsafe { - let ptr = raw::git_index_get_byindex(self.raw, n as size_t); - if ptr.is_null() {None} else {Some(Binding::from_raw(*ptr))} - } - } - - /// Get an iterator over the entries in this index. - pub fn iter(&self) -> IndexEntries { - IndexEntries { range: 0..self.len(), index: self } - } - - /// Get one of the entries in the index by its path. - pub fn get_path(&self, path: &Path, stage: i32) -> Option { - let path = path.into_c_string().unwrap(); - unsafe { - let ptr = call!(raw::git_index_get_bypath(self.raw, path, - stage as c_int)); - if ptr.is_null() {None} else {Some(Binding::from_raw(*ptr))} - } - } - - /// Get the full path to the index file on disk. - /// - /// Returns `None` if this is an in-memory index. - pub fn path(&self) -> Option<&Path> { - unsafe { - ::opt_bytes(self, raw::git_index_path(&*self.raw)).map(util::bytes2path) - } - } - - /// Update the contents of an existing index object in memory by reading - /// from the hard disk. - /// - /// If force is true, this performs a "hard" read that discards in-memory - /// changes and always reloads the on-disk index data. If there is no - /// on-disk version, the index will be cleared. - /// - /// If force is false, this does a "soft" read that reloads the index data - /// from disk only if it has changed since the last time it was loaded. - /// Purely in-memory index data will be untouched. Be aware: if there are - /// changes on disk, unwritten in-memory changes are discarded. - pub fn read(&mut self, force: bool) -> Result<(), Error> { - unsafe { try_call!(raw::git_index_read(self.raw, force)); } - Ok(()) - } - - /// Read a tree into the index file with stats - /// - /// The current index contents will be replaced by the specified tree. - pub fn read_tree(&mut self, tree: &Tree) -> Result<(), Error> { - unsafe { try_call!(raw::git_index_read_tree(self.raw, &*tree.raw())); } - Ok(()) - } - - /// Remove an entry from the index - pub fn remove(&mut self, path: &Path, stage: i32) -> Result<(), Error> { - let path = try!(path.into_c_string()); - unsafe { - try_call!(raw::git_index_remove(self.raw, path, stage as c_int)); - } - Ok(()) - } - - /// Remove an index entry corresponding to a file on disk. - /// - /// The file path must be relative to the repository's working folder. It - /// may exist. - /// - /// If this file currently is the result of a merge conflict, this file will - /// no longer be marked as conflicting. The data about the conflict will be - /// moved to the "resolve undo" (REUC) section. - pub fn remove_path(&mut self, path: &Path) -> Result<(), Error> { - let path = try!(path.into_c_string()); - unsafe { - try_call!(raw::git_index_remove_bypath(self.raw, path)); - } - Ok(()) - } - - /// Remove all entries from the index under a given directory. - pub fn remove_dir(&mut self, path: &Path, stage: i32) -> Result<(), Error> { - let path = try!(path.into_c_string()); - unsafe { - try_call!(raw::git_index_remove_directory(self.raw, path, - stage as c_int)); - } - Ok(()) - } - - /// Remove all matching index entries. - /// - /// If you provide a callback function, it will be invoked on each matching - /// item in the index immediately before it is removed. Return 0 to remove - /// the item, > 0 to skip the item, and < 0 to abort the scan. - pub fn remove_all(&mut self, - pathspecs: I, - mut cb: Option<&mut IndexMatchedPath>) - -> Result<(), Error> - where T: IntoCString, I: IntoIterator, - { - let (_a, _b, raw_strarray) = try!(::util::iter2cstrs(pathspecs)); - let ptr = cb.as_mut(); - let callback = ptr.as_ref().map(|_| { - index_matched_path_cb as raw::git_index_matched_path_cb - }); - unsafe { - try_call!(raw::git_index_remove_all(self.raw, - &raw_strarray, - callback, - ptr.map(|p| p as *mut _) - .unwrap_or(0 as *mut _) - as *mut c_void)); - } - return Ok(()); - } - - /// Update all index entries to match the working directory - /// - /// This method will fail in bare index instances. - /// - /// This scans the existing index entries and synchronizes them with the - /// working directory, deleting them if the corresponding working directory - /// file no longer exists otherwise updating the information (including - /// adding the latest version of file to the ODB if needed). - /// - /// If you provide a callback function, it will be invoked on each matching - /// item in the index immediately before it is updated (either refreshed or - /// removed depending on working directory state). Return 0 to proceed with - /// updating the item, > 0 to skip the item, and < 0 to abort the scan. - pub fn update_all(&mut self, - pathspecs: I, - mut cb: Option<&mut IndexMatchedPath>) - -> Result<(), Error> - where T: IntoCString, I: IntoIterator, - { - let (_a, _b, raw_strarray) = try!(::util::iter2cstrs(pathspecs)); - let ptr = cb.as_mut(); - let callback = ptr.as_ref().map(|_| { - index_matched_path_cb as raw::git_index_matched_path_cb - }); - unsafe { - try_call!(raw::git_index_update_all(self.raw, - &raw_strarray, - callback, - ptr.map(|p| p as *mut _) - .unwrap_or(0 as *mut _) - as *mut c_void)); - } - return Ok(()); - } - - /// Write an existing index object from memory back to disk using an atomic - /// file lock. - pub fn write(&mut self) -> Result<(), Error> { - unsafe { try_call!(raw::git_index_write(self.raw)); } - Ok(()) - } - - /// Write the index as a tree. - /// - /// This method will scan the index and write a representation of its - /// current state back to disk; it recursively creates tree objects for each - /// of the subtrees stored in the index, but only returns the OID of the - /// root tree. This is the OID that can be used e.g. to create a commit. - /// - /// The index instance cannot be bare, and needs to be associated to an - /// existing repository. - /// - /// The index must not contain any file in conflict. - pub fn write_tree(&mut self) -> Result { - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_index_write_tree(&mut raw, self.raw)); - Ok(Binding::from_raw(&raw as *const _)) - } - } - - /// Write the index as a tree to the given repository - /// - /// This is the same as `write_tree` except that the destination repository - /// can be chosen. - pub fn write_tree_to(&mut self, repo: &Repository) -> Result { - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_index_write_tree_to(&mut raw, self.raw, - repo.raw())); - Ok(Binding::from_raw(&raw as *const _)) - } - } -} - -impl Binding for Index { - type Raw = *mut raw::git_index; - unsafe fn from_raw(raw: *mut raw::git_index) -> Index { - Index { raw: raw } - } - fn raw(&self) -> *mut raw::git_index { self.raw } -} - -extern fn index_matched_path_cb(path: *const c_char, - matched_pathspec: *const c_char, - payload: *mut c_void) -> c_int { - unsafe { - let path = CStr::from_ptr(path).to_bytes(); - let matched_pathspec = CStr::from_ptr(matched_pathspec).to_bytes(); - - panic::wrap(|| { - let payload = payload as *mut &mut IndexMatchedPath; - (*payload)(util::bytes2path(path), matched_pathspec) as c_int - }).unwrap_or(-1) - } -} - -impl Drop for Index { - fn drop(&mut self) { - unsafe { raw::git_index_free(self.raw) } - } -} - -impl<'index> Iterator for IndexEntries<'index> { - type Item = IndexEntry; - fn next(&mut self) -> Option { - self.range.next().map(|i| self.index.get(i).unwrap()) - } -} - -impl Binding for IndexEntry { - type Raw = raw::git_index_entry; - - unsafe fn from_raw(raw: raw::git_index_entry) -> IndexEntry { - let raw::git_index_entry { - ctime, mtime, dev, ino, mode, uid, gid, file_size, id, flags, - flags_extended, path - } = raw; - - // libgit2 encodes the length of the path in the lower bits of `flags`, - // but if the length exceeds the number of bits then the path is - // nul-terminated. - let mut pathlen = (flags & raw::GIT_IDXENTRY_NAMEMASK) as usize; - if pathlen == raw::GIT_IDXENTRY_NAMEMASK as usize { - pathlen = CStr::from_ptr(path).to_bytes().len(); - } - - let path = slice::from_raw_parts(path as *const u8, pathlen); - - IndexEntry { - dev: dev, - ino: ino, - mode: mode, - uid: uid, - gid: gid, - file_size: file_size, - id: Binding::from_raw(&id as *const _), - flags: flags, - flags_extended: flags_extended, - path: path.to_vec(), - mtime: Binding::from_raw(mtime), - ctime: Binding::from_raw(ctime), - } - } - - fn raw(&self) -> raw::git_index_entry { - // not implemented, may require a CString in storage - panic!() - } -} - -#[cfg(test)] -mod tests { - use std::fs::{self, File}; - use std::path::Path; - use tempdir::TempDir; - - use {Index, IndexEntry, Repository, ResetType, Oid, IndexTime}; - - #[test] - fn smoke() { - let mut index = Index::new().unwrap(); - assert!(index.add_path(&Path::new(".")).is_err()); - index.clear().unwrap(); - assert_eq!(index.len(), 0); - assert!(index.get(0).is_none()); - assert!(index.path().is_none()); - assert!(index.read(true).is_err()); - } - - #[test] - fn smoke_from_repo() { - let (_td, repo) = ::test::repo_init(); - let mut index = repo.index().unwrap(); - assert_eq!(index.path().map(|s| s.to_path_buf()), - Some(repo.path().join("index"))); - Index::open(&repo.path().join("index")).unwrap(); - - index.clear().unwrap(); - index.read(true).unwrap(); - index.write().unwrap(); - index.write_tree().unwrap(); - index.write_tree_to(&repo).unwrap(); - } - - #[test] - fn add_all() { - let (_td, repo) = ::test::repo_init(); - let mut index = repo.index().unwrap(); - - let root = repo.path().parent().unwrap(); - fs::create_dir(&root.join("foo")).unwrap(); - File::create(&root.join("foo/bar")).unwrap(); - let mut called = false; - index.add_all(["foo"].iter(), ::ADD_DEFAULT, - Some(&mut |a: &Path, b: &[u8]| { - assert!(!called); - called = true; - assert_eq!(b, b"foo"); - assert_eq!(a, Path::new("foo/bar")); - 0 - })).unwrap(); - assert!(called); - - called = false; - index.remove_all(["."].iter(), Some(&mut |a: &Path, b: &[u8]| { - assert!(!called); - called = true; - assert_eq!(b, b"."); - assert_eq!(a, Path::new("foo/bar")); - 0 - })).unwrap(); - assert!(called); - } - - #[test] - fn smoke_add() { - let (_td, repo) = ::test::repo_init(); - let mut index = repo.index().unwrap(); - - let root = repo.path().parent().unwrap(); - fs::create_dir(&root.join("foo")).unwrap(); - File::create(&root.join("foo/bar")).unwrap(); - index.add_path(Path::new("foo/bar")).unwrap(); - index.write().unwrap(); - assert_eq!(index.iter().count(), 1); - - // Make sure we can use this repo somewhere else now. - let id = index.write_tree().unwrap(); - let tree = repo.find_tree(id).unwrap(); - let sig = repo.signature().unwrap(); - let id = repo.refname_to_id("HEAD").unwrap(); - let parent = repo.find_commit(id).unwrap(); - let commit = repo.commit(Some("HEAD"), &sig, &sig, "commit", - &tree, &[&parent]).unwrap(); - let obj = repo.find_object(commit, None).unwrap(); - repo.reset(&obj, ResetType::Hard, None).unwrap(); - - let td2 = TempDir::new("git").unwrap(); - let url = ::test::path2url(&root); - let repo = Repository::clone(&url, td2.path()).unwrap(); - let obj = repo.find_object(commit, None).unwrap(); - repo.reset(&obj, ResetType::Hard, None).unwrap(); - } - - #[test] - fn add_then_read() { - let mut index = Index::new().unwrap(); - assert!(index.add(&entry()).is_err()); - - let mut index = Index::new().unwrap(); - let mut e = entry(); - e.path = b"foobar".to_vec(); - index.add(&e).unwrap(); - let e = index.get(0).unwrap(); - assert_eq!(e.path.len(), 6); - } - - fn entry() -> IndexEntry { - IndexEntry { - ctime: IndexTime::new(0, 0), - mtime: IndexTime::new(0, 0), - dev: 0, - ino: 0, - mode: 0o100644, - uid: 0, - gid: 0, - file_size: 0, - id: Oid::from_bytes(&[0; 20]).unwrap(), - flags: 0, - flags_extended: 0, - path: Vec::new(), - } - } -} - diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/lib.rs cargo-0.19.0/vendor/git2-0.6.3/src/lib.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/lib.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1040 +0,0 @@ -//! # libgit2 bindings for Rust -//! -//! This library contains bindings to the [libgit2][1] C library which is used -//! to manage git repositories. The library itself is a work in progress and is -//! likely lacking some bindings here and there, so be warned. -//! -//! [1]: https://libgit2.github.com/ -//! -//! The git2-rs library strives to be as close to libgit2 as possible, but also -//! strives to make using libgit2 as safe as possible. All resource management -//! is automatic as well as adding strong types to all interfaces (including -//! `Result`) -//! -//! ## Creating a `Repository` -//! -//! The `Repository` is the source from which almost all other objects in git-rs -//! are spawned. A repository can be created through opening, initializing, or -//! cloning. -//! -//! ### Initializing a new repository -//! -//! The `init` method will create a new repository, assuming one does not -//! already exist. -//! -//! ```no_run -//! # #![allow(unstable)] -//! use git2::Repository; -//! -//! let repo = match Repository::init("/path/to/a/repo") { -//! Ok(repo) => repo, -//! Err(e) => panic!("failed to init: {}", e), -//! }; -//! ``` -//! -//! ### Opening an existing repository -//! -//! ```no_run -//! # #![allow(unstable)] -//! use git2::Repository; -//! -//! let repo = match Repository::open("/path/to/a/repo") { -//! Ok(repo) => repo, -//! Err(e) => panic!("failed to open: {}", e), -//! }; -//! ``` -//! -//! ### Cloning an existing repository -//! -//! ```no_run -//! # #![allow(unstable)] -//! use git2::Repository; -//! -//! let url = "https://github.com/alexcrichton/git2-rs"; -//! let repo = match Repository::clone(url, "/path/to/a/repo") { -//! Ok(repo) => repo, -//! Err(e) => panic!("failed to clone: {}", e), -//! }; -//! ``` -//! -//! ## Working with a `Repository` -//! -//! All deriviative objects, references, etc are attached to the lifetime of the -//! source `Repository`, to ensure that they do not outlive the repository -//! itself. - -#![doc(html_root_url = "http://alexcrichton.com/git2-rs")] -#![allow(trivial_numeric_casts, trivial_casts)] -#![deny(missing_docs)] -#![cfg_attr(test, deny(warnings))] - -extern crate libc; -extern crate url; -extern crate libgit2_sys as raw; -#[macro_use] extern crate bitflags; -#[cfg(test)] extern crate tempdir; - -use std::ffi::{CStr, CString}; -use std::fmt; -use std::str; -use std::sync::{Once, ONCE_INIT}; - -pub use blame::{Blame, BlameHunk, BlameIter, BlameOptions}; -pub use blob::Blob; -pub use branch::{Branch, Branches}; -pub use buf::Buf; -pub use commit::{Commit, Parents}; -pub use config::{Config, ConfigEntry, ConfigEntries}; -pub use cred::{Cred, CredentialHelper}; -pub use describe::{Describe, DescribeFormatOptions, DescribeOptions}; -pub use diff::{Diff, DiffDelta, DiffFile, DiffOptions, Deltas}; -pub use diff::{DiffBinary, DiffBinaryFile, DiffBinaryKind}; -pub use diff::{DiffLine, DiffHunk, DiffStats, DiffFindOptions}; -pub use error::Error; -pub use index::{Index, IndexEntry, IndexEntries, IndexMatchedPath}; -pub use merge::{AnnotatedCommit, MergeOptions}; -pub use message::{message_prettify, DEFAULT_COMMENT_CHAR}; -pub use note::{Note, Notes}; -pub use object::Object; -pub use oid::Oid; -pub use packbuilder::{PackBuilder, PackBuilderStage}; -pub use pathspec::{Pathspec, PathspecMatchList, PathspecFailedEntries}; -pub use pathspec::{PathspecDiffEntries, PathspecEntries}; -pub use patch::Patch; -pub use proxy_options::ProxyOptions; -pub use reference::{Reference, References, ReferenceNames}; -pub use reflog::{Reflog, ReflogEntry, ReflogIter}; -pub use refspec::Refspec; -pub use remote::{Remote, Refspecs, RemoteHead, FetchOptions, PushOptions}; -pub use remote_callbacks::{RemoteCallbacks, Credentials, TransferProgress}; -pub use remote_callbacks::{TransportMessage, Progress, UpdateTips}; -pub use repo::{Repository, RepositoryInitOptions}; -pub use revspec::Revspec; -pub use revwalk::Revwalk; -pub use signature::Signature; -pub use status::{StatusOptions, Statuses, StatusIter, StatusEntry, StatusShow}; -pub use submodule::Submodule; -pub use tag::Tag; -pub use time::{Time, IndexTime}; -pub use tree::{Tree, TreeEntry, TreeIter}; -pub use treebuilder::TreeBuilder; -pub use util::IntoCString; - -/// An enumeration of possible errors that can happen when working with a git -/// repository. -#[derive(PartialEq, Eq, Clone, Debug, Copy)] -pub enum ErrorCode { - /// Generic error - GenericError, - /// Requested object could not be found - NotFound, - /// Object exists preventing operation - Exists, - /// More than one object matches - Ambiguous, - /// Output buffer too short to hold data - BufSize, - /// User-generated error - User, - /// Operation not allowed on bare repository - BareRepo, - /// HEAD refers to branch with no commits - UnbornBranch, - /// Merge in progress prevented operation - Unmerged, - /// Reference was not fast-forwardable - NotFastForward, - /// Name/ref spec was not in a valid format - InvalidSpec, - /// Checkout conflicts prevented operation - Conflict, - /// Lock file prevented operation - Locked, - /// Reference value does not match expected - Modified, - /// Authentication error - Auth, - /// Server certificate is invalid - Certificate, - /// Patch/merge has already been applied - Applied, - /// The requested peel operation is not possible - Peel, - /// Unexpected EOF - Eof, - /// Invalid operation or input - Invalid, - /// Uncommitted changes in index prevented operation - Uncommitted, - /// Operation was not valid for a directory, - Directory, -} - -/// An enumeration of possible categories of things that can have -/// errors when working with a git repository. -#[derive(PartialEq, Eq, Clone, Debug, Copy)] -pub enum ErrorClass { - /// Uncategorized - None, - /// Out of memory or insufficient allocated space - NoMemory, - /// Syscall or standard system library error - Os, - /// Invalid input - Invalid, - /// Error resolving or manipulating a reference - Reference, - /// ZLib failure - Zlib, - /// Bad repository state - Repository, - /// Bad configuration - Config, - /// Regex failure - Regex, - /// Bad object - Odb, - /// Invalid index data - Index, - /// Error creating or obtaining an object - Object, - /// Network error - Net, - /// Error manpulating a tag - Tag, - /// Invalid value in tree - Tree, - /// Hashing or packing error - Indexer, - /// Error from SSL - Ssl, - /// Error involing submodules - Submodule, - /// Threading error - Thread, - /// Error manipulating a stash - Stash, - /// Checkout failure - Checkout, - /// Invalid FETCH_HEAD - FetchHead, - /// Merge failure - Merge, - /// SSH failure - Ssh, - /// Error manipulating filters - Filter, - /// Error reverting commit - Revert, - /// Error from a user callback - Callback, - /// Error cherry-picking commit - CherryPick, - /// Can't describe object - Describe, - /// Error during rebase - Rebase, - /// Filesystem-related error - Filesystem, -} - -/// A listing of the possible states that a repository can be in. -#[derive(PartialEq, Eq, Clone, Debug, Copy)] -#[allow(missing_docs)] -pub enum RepositoryState { - Clean, - Merge, - Revert, - RevertSequence, - CherryPick, - CherryPickSequence, - Bisect, - Rebase, - RebaseInteractive, - RebaseMerge, - ApplyMailbox, - ApplyMailboxOrRebase, -} - -/// An enumeration of the possible directions for a remote. -#[derive(Copy, Clone)] -pub enum Direction { - /// Data will be fetched (read) from this remote. - Fetch, - /// Data will be pushed (written) to this remote. - Push, -} - -/// An enumeration of the operations that can be performed for the `reset` -/// method on a `Repository`. -#[derive(Copy, Clone)] -pub enum ResetType { - /// Move the head to the given commit. - Soft, - /// Soft plus reset the index to the commit. - Mixed, - /// Mixed plus changes in the working tree are discarded. - Hard, -} - -/// An enumeration all possible kinds objects may have. -#[derive(PartialEq, Eq, Copy, Clone, Debug)] -pub enum ObjectType { - /// Any kind of git object - Any, - /// An object which corresponds to a git commit - Commit, - /// An object which corresponds to a git tree - Tree, - /// An object which corresponds to a git blob - Blob, - /// An object which corresponds to a git tag - Tag, -} - -/// An enumeration for the possible types of branches -#[derive(PartialEq, Eq, Debug, Copy, Clone)] -pub enum BranchType { - /// A local branch not on a remote. - Local, - /// A branch for a remote. - Remote, -} - -/// An enumeration of the possible priority levels of a config file. -/// -/// The levels corresponding to the escalation logic (higher to lower) when -/// searching for config entries. -#[derive(PartialEq, Eq, Debug, Copy, Clone)] -pub enum ConfigLevel { - /// System-wide on Windows, for compatibility with portable git - ProgramData, - /// System-wide configuration file, e.g. /etc/gitconfig - System, - /// XDG-compatible configuration file, e.g. ~/.config/git/config - XDG, - /// User-specific configuration, e.g. ~/.gitconfig - Global, - /// Repository specific config, e.g. $PWD/.git/config - Local, - /// Application specific configuration file - App, - /// Highest level available - Highest, -} - -/// Merge file favor options for `MergeOptions` instruct the file-level -/// merging functionality how to deal with conflicting regions of the files. -#[derive(PartialEq, Eq, Debug, Copy, Clone)] -pub enum FileFavor { - /// When a region of a file is changed in both branches, a conflict will be - /// recorded in the index so that git_checkout can produce a merge file with - /// conflict markers in the working directory. This is the default. - Normal, - /// When a region of a file is changed in both branches, the file created - /// in the index will contain the "ours" side of any conflicting region. - /// The index will not record a conflict. - Ours, - /// When a region of a file is changed in both branches, the file created - /// in the index will contain the "theirs" side of any conflicting region. - /// The index will not record a conflict. - Theirs, - /// When a region of a file is changed in both branches, the file created - /// in the index will contain each unique line from each side, which has - /// the result of combining both files. The index will not record a conflict. - Union, -} - -bitflags! { - /// Orderings that may be specified for Revwalk iteration. - pub flags Sort: u32 { - /// Sort the repository contents in no particular ordering. - /// - /// This sorting is arbitrary, implementation-specific, and subject to - /// change at any time. This is the default sorting for new walkers. - const SORT_NONE = raw::GIT_SORT_NONE as u32, - - /// Sort the repository contents in topological order (parents before - /// children). - /// - /// This sorting mode can be combined with time sorting. - const SORT_TOPOLOGICAL = raw::GIT_SORT_TOPOLOGICAL as u32, - - /// Sort the repository contents by commit time. - /// - /// This sorting mode can be combined with topological sorting. - const SORT_TIME = raw::GIT_SORT_TIME as u32, - - /// Iterate through the repository contents in reverse order. - /// - /// This sorting mode can be combined with any others. - const SORT_REVERSE = raw::GIT_SORT_REVERSE as u32, - } -} - -bitflags! { - /// Types of credentials that can be requested by a credential callback. - pub flags CredentialType: u32 { - #[allow(missing_docs)] - const USER_PASS_PLAINTEXT = raw::GIT_CREDTYPE_USERPASS_PLAINTEXT as u32, - #[allow(missing_docs)] - const SSH_KEY = raw::GIT_CREDTYPE_SSH_KEY as u32, - #[allow(missing_docs)] - const SSH_MEMORY = raw::GIT_CREDTYPE_SSH_MEMORY as u32, - #[allow(missing_docs)] - const SSH_CUSTOM = raw::GIT_CREDTYPE_SSH_CUSTOM as u32, - #[allow(missing_docs)] - const DEFAULT = raw::GIT_CREDTYPE_DEFAULT as u32, - #[allow(missing_docs)] - const SSH_INTERACTIVE = raw::GIT_CREDTYPE_SSH_INTERACTIVE as u32, - #[allow(missing_docs)] - const USERNAME = raw::GIT_CREDTYPE_USERNAME as u32, - } -} - -bitflags! { - /// Flags for the `flags` field of an IndexEntry. - pub flags IndexEntryFlag: u16 { - /// Set when the `extended_flags` field is valid. - const IDXENTRY_EXTENDED = raw::GIT_IDXENTRY_EXTENDED as u16, - /// "Assume valid" flag - const IDXENTRY_VALID = raw::GIT_IDXENTRY_VALID as u16, - } -} - -bitflags! { - /// Flags for the `extended_flags` field of an IndexEntry. - pub flags IndexEntryExtendedFlag: u16 { - /// An "intent to add" entry from "git add -N" - const IDXENTRY_INTENT_TO_ADD = raw::GIT_IDXENTRY_INTENT_TO_ADD as u16, - /// Skip the associated worktree file, for sparse checkouts - const IDXENTRY_SKIP_WORKTREE = raw::GIT_IDXENTRY_SKIP_WORKTREE as u16, - /// Reserved for a future on-disk extended flag - const IDXENTRY_EXTENDED2 = raw::GIT_IDXENTRY_EXTENDED2 as u16, - - #[allow(missing_docs)] - const IDXENTRY_UPDATE = raw::GIT_IDXENTRY_UPDATE as u16, - #[allow(missing_docs)] - const IDXENTRY_REMOVE = raw::GIT_IDXENTRY_REMOVE as u16, - #[allow(missing_docs)] - const IDXENTRY_UPTODATE = raw::GIT_IDXENTRY_UPTODATE as u16, - #[allow(missing_docs)] - const IDXENTRY_ADDED = raw::GIT_IDXENTRY_ADDED as u16, - - #[allow(missing_docs)] - const IDXENTRY_HASHED = raw::GIT_IDXENTRY_HASHED as u16, - #[allow(missing_docs)] - const IDXENTRY_UNHASHED = raw::GIT_IDXENTRY_UNHASHED as u16, - #[allow(missing_docs)] - const IDXENTRY_WT_REMOVE = raw::GIT_IDXENTRY_WT_REMOVE as u16, - #[allow(missing_docs)] - const IDXENTRY_CONFLICTED = raw::GIT_IDXENTRY_CONFLICTED as u16, - - #[allow(missing_docs)] - const IDXENTRY_UNPACKED = raw::GIT_IDXENTRY_UNPACKED as u16, - #[allow(missing_docs)] - const IDXENTRY_NEW_SKIP_WORKTREE = raw::GIT_IDXENTRY_NEW_SKIP_WORKTREE as u16, - } -} - -bitflags! { - /// Flags for APIs that add files matching pathspec - pub flags IndexAddOption: u32 { - #[allow(missing_docs)] - const ADD_DEFAULT = raw::GIT_INDEX_ADD_DEFAULT as u32, - #[allow(missing_docs)] - const ADD_FORCE = raw::GIT_INDEX_ADD_FORCE as u32, - #[allow(missing_docs)] - const ADD_DISABLE_PATHSPEC_MATCH = - raw::GIT_INDEX_ADD_DISABLE_PATHSPEC_MATCH as u32, - #[allow(missing_docs)] - const ADD_CHECK_PATHSPEC = raw::GIT_INDEX_ADD_CHECK_PATHSPEC as u32, - } -} - -bitflags! { - /// Flags for `Repository::open_ext` - pub flags RepositoryOpenFlags: u32 { - /// Only open the specified path; don't walk upward searching. - const REPOSITORY_OPEN_NO_SEARCH = raw::GIT_REPOSITORY_OPEN_NO_SEARCH as u32, - /// Search across filesystem boundaries. - const REPOSITORY_OPEN_CROSS_FS = raw::GIT_REPOSITORY_OPEN_CROSS_FS as u32, - /// Force opening as bare repository, and defer loading its config. - const REPOSITORY_OPEN_BARE = raw::GIT_REPOSITORY_OPEN_BARE as u32, - /// Don't try appending `/.git` to the specified repository path. - const REPOSITORY_OPEN_NO_DOTGIT = raw::GIT_REPOSITORY_OPEN_NO_DOTGIT as u32, - /// Respect environment variables like `$GIT_DIR`. - const REPOSITORY_OPEN_FROM_ENV = raw::GIT_REPOSITORY_OPEN_FROM_ENV as u32, - } -} - -bitflags! { - /// Flags for the return value of `Repository::revparse` - pub flags RevparseMode: u32 { - /// The spec targeted a single object - const REVPARSE_SINGLE = raw::GIT_REVPARSE_SINGLE as u32, - /// The spec targeted a range of commits - const REVPARSE_RANGE = raw::GIT_REVPARSE_RANGE as u32, - /// The spec used the `...` operator, which invokes special semantics. - const REVPARSE_MERGE_BASE = raw::GIT_REVPARSE_MERGE_BASE as u32, - } -} - -#[cfg(test)] #[macro_use] mod test; -#[macro_use] mod panic; -mod call; -mod util; - -pub mod build; -pub mod cert; -pub mod string_array; -pub mod oid_array; -pub mod transport; - -mod blame; -mod blob; -mod branch; -mod buf; -mod commit; -mod config; -mod cred; -mod describe; -mod diff; -mod error; -mod index; -mod merge; -mod message; -mod note; -mod object; -mod oid; -mod packbuilder; -mod pathspec; -mod patch; -mod proxy_options; -mod reference; -mod reflog; -mod refspec; -mod remote; -mod remote_callbacks; -mod repo; -mod revspec; -mod revwalk; -mod signature; -mod status; -mod submodule; -mod tag; -mod time; -mod tree; -mod treebuilder; - -fn init() { - static INIT: Once = ONCE_INIT; - - INIT.call_once(|| { - openssl_env_init(); - }); - - raw::init(); -} - -#[cfg(all(unix, not(target_os = "macos"), not(target_os = "ios"), feature = "https"))] -fn openssl_env_init() { - extern crate openssl_probe; - - // Currently, libgit2 leverages OpenSSL for SSL support when cloning - // repositories over HTTPS. This means that we're picking up an OpenSSL - // dependency on non-Windows platforms (where it has its own HTTPS - // subsystem). As a result, we need to link to OpenSSL. - // - // Now actually *linking* to OpenSSL isn't so hard. We just need to make - // sure to use pkg-config to discover any relevant system dependencies for - // differences between distributions like CentOS and Ubuntu. The actual - // trickiness comes about when we start *distributing* the resulting - // binaries. Currently Cargo is distributed in binary form as nightlies, - // which means we're distributing a binary with OpenSSL linked in. - // - // For historical reasons, the Linux nightly builder is running a CentOS - // distribution in order to have as much ABI compatibility with other - // distributions as possible. Sadly, however, this compatibility does not - // extend to OpenSSL. Currently OpenSSL has two major versions, 0.9 and 1.0, - // which are incompatible (many ABI differences). The CentOS builder we - // build on has version 1.0, as do most distributions today. Some still have - // 0.9, however. This means that if we are to distribute the binaries built - // by the CentOS machine, we would only be compatible with OpenSSL 1.0 and - // we would fail to run (a dynamic linker error at runtime) on systems with - // only 9.8 installed (hopefully). - // - // But wait, the plot thickens! Apparently CentOS has dubbed their OpenSSL - // library as `libssl.so.10`, notably the `10` is included at the end. On - // the other hand Ubuntu, for example, only distributes `libssl.so`. This - // means that the binaries created at CentOS are hard-wired to probe for a - // file called `libssl.so.10` at runtime (using the LD_LIBRARY_PATH), which - // will not be found on ubuntu. The conclusion of this is that binaries - // built on CentOS cannot be distributed to Ubuntu and run successfully. - // - // There are a number of sneaky things we could do, including, but not - // limited to: - // - // 1. Create a shim program which runs "just before" cargo runs. The - // responsibility of this shim program would be to locate `libssl.so`, - // whatever it's called, on the current system, make sure there's a - // symlink *somewhere* called `libssl.so.10`, and then set up - // LD_LIBRARY_PATH and run the actual cargo. - // - // This approach definitely seems unconventional, and is borderline - // overkill for this problem. It's also dubious if we can find a - // libssl.so reliably on the target system. - // - // 2. Somehow re-work the CentOS installation so that the linked-against - // library is called libssl.so instead of libssl.so.10 - // - // The problem with this approach is that systems with 0.9 installed will - // start to silently fail, due to also having libraries called libssl.so - // (probably symlinked under a more appropriate version). - // - // 3. Compile Cargo against both OpenSSL 1.0 *and* OpenSSL 0.9, and - // distribute both. Also make sure that the linked-against name of the - // library is `libssl.so`. At runtime we determine which version is - // installed, and we then the appropriate binary. - // - // This approach clearly has drawbacks in terms of infrastructure and - // feasibility. - // - // 4. Build a nightly of Cargo for each distribution we'd like to support. - // You would then pick the appropriate Cargo nightly to install locally. - // - // So, with all this in mind, the decision was made to *statically* link - // OpenSSL. This solves any problem of relying on a downstream OpenSSL - // version being available. This does, however, open a can of worms related - // to security issues. It's generally a good idea to dynamically link - // OpenSSL as you'll get security updates over time without having to do - // anything (the system administrator will update the local openssl - // package). By statically linking, we're forfeiting this feature. - // - // The conclusion was made it is likely appropriate for the Cargo nightlies - // to statically link OpenSSL, but highly encourage distributions and - // packagers of Cargo to dynamically link OpenSSL. Packagers are targeting - // one system and are distributing to only that system, so none of the - // problems mentioned above would arise. - // - // In order to support this, a new package was made: openssl-static-sys. - // This package currently performs a fairly simple task: - // - // 1. Run pkg-config to discover where openssl is installed. - // 2. If openssl is installed in a nonstandard location, *and* static copies - // of the libraries are available, copy them to $OUT_DIR. - // - // This library will bring in libssl.a and libcrypto.a into the local build, - // allowing them to be picked up by this crate. This allows us to configure - // our own buildbots to have pkg-config point to these local pre-built - // copies of a static OpenSSL (with very few dependencies) while allowing - // most other builds of Cargo to naturally dynamically link OpenSSL. - // - // So in summary, if you're with me so far, we've statically linked OpenSSL - // to the Cargo binary (or any binary, for that matter) and we're ready to - // distribute it to *all* linux distributions. Remember that our original - // intent for openssl was for HTTPS support, which implies that we need some - // for of CA certificate store to validate certificates. This is normally - // installed in a standard system location. - // - // Unfortunately, as one might imagine, OpenSSL is configured for where this - // standard location is at *build time*, but it often varies widely - // per-system. Consequently, it was discovered that OpenSSL will respect the - // SSL_CERT_FILE and SSL_CERT_DIR environment variables in order to assist - // in discovering the location of this file (hurray!). - // - // So, finally getting to the point, this function solely exists to support - // our static builds of OpenSSL by probing for the "standard system - // location" of certificates and setting relevant environment variable to - // point to them. - // - // Ah, and as a final note, this is only a problem on Linux, not on OS X. On - // OS X the OpenSSL binaries are stable enough that we can just rely on - // dynamic linkage (plus they have some weird modifications to OpenSSL which - // means we wouldn't want to link statically). - openssl_probe::init_ssl_cert_env_vars(); -} - -#[cfg(any(windows, target_os = "macos", target_os = "ios", not(feature = "https")))] -fn openssl_env_init() {} - -unsafe fn opt_bytes<'a, T>(_anchor: &'a T, - c: *const libc::c_char) -> Option<&'a [u8]> { - if c.is_null() { - None - } else { - Some(CStr::from_ptr(c).to_bytes()) - } -} - -fn opt_cstr(o: Option) -> Result, Error> { - match o { - Some(s) => s.into_c_string().map(Some), - None => Ok(None) - } -} - -impl ObjectType { - /// Convert an object type to its string representation. - pub fn str(&self) -> &'static str { - unsafe { - let ptr = call!(raw::git_object_type2string(*self)) as *const _; - let data = CStr::from_ptr(ptr).to_bytes(); - str::from_utf8(data).unwrap() - } - } - - /// Determine if the given git_otype is a valid loose object type. - pub fn is_loose(&self) -> bool { - unsafe { (call!(raw::git_object_typeisloose(*self)) == 1) } - } - - /// Convert a raw git_otype to an ObjectType - pub fn from_raw(raw: raw::git_otype) -> Option { - match raw { - raw::GIT_OBJ_ANY => Some(ObjectType::Any), - raw::GIT_OBJ_COMMIT => Some(ObjectType::Commit), - raw::GIT_OBJ_TREE => Some(ObjectType::Tree), - raw::GIT_OBJ_BLOB => Some(ObjectType::Blob), - raw::GIT_OBJ_TAG => Some(ObjectType::Tag), - _ => None, - } - } - - /// Convert this kind into its raw representation - pub fn raw(&self) -> raw::git_otype { - call::convert(self) - } - - /// Convert a string object type representation to its object type. - pub fn from_str(s: &str) -> Option { - let raw = unsafe { call!(raw::git_object_string2type(CString::new(s).unwrap())) }; - ObjectType::from_raw(raw) - } -} - -impl fmt::Display for ObjectType { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.str().fmt(f) - } -} - -impl ConfigLevel { - /// Converts a raw configuration level to a ConfigLevel - pub fn from_raw(raw: raw::git_config_level_t) -> ConfigLevel { - match raw { - raw::GIT_CONFIG_LEVEL_PROGRAMDATA => ConfigLevel::ProgramData, - raw::GIT_CONFIG_LEVEL_SYSTEM => ConfigLevel::System, - raw::GIT_CONFIG_LEVEL_XDG => ConfigLevel::XDG, - raw::GIT_CONFIG_LEVEL_GLOBAL => ConfigLevel::Global, - raw::GIT_CONFIG_LEVEL_LOCAL => ConfigLevel::Local, - raw::GIT_CONFIG_LEVEL_APP => ConfigLevel::App, - raw::GIT_CONFIG_HIGHEST_LEVEL => ConfigLevel::Highest, - n => panic!("unknown config level: {}", n), - } - } -} - -bitflags! { - /// Status flags for a single file - /// - /// A combination of these values will be returned to indicate the status of - /// a file. Status compares the working directory, the index, and the - /// current HEAD of the repository. The `STATUS_INDEX_*` set of flags - /// represents the status of file in the index relative to the HEAD, and the - /// `STATUS_WT_*` set of flags represent the status of the file in the - /// working directory relative to the index. - pub flags Status: u32 { - #[allow(missing_docs)] - const STATUS_CURRENT = raw::GIT_STATUS_CURRENT as u32, - - #[allow(missing_docs)] - const STATUS_INDEX_NEW = raw::GIT_STATUS_INDEX_NEW as u32, - #[allow(missing_docs)] - const STATUS_INDEX_MODIFIED = raw::GIT_STATUS_INDEX_MODIFIED as u32, - #[allow(missing_docs)] - const STATUS_INDEX_DELETED = raw::GIT_STATUS_INDEX_DELETED as u32, - #[allow(missing_docs)] - const STATUS_INDEX_RENAMED = raw::GIT_STATUS_INDEX_RENAMED as u32, - #[allow(missing_docs)] - const STATUS_INDEX_TYPECHANGE = raw::GIT_STATUS_INDEX_TYPECHANGE as u32, - - #[allow(missing_docs)] - const STATUS_WT_NEW = raw::GIT_STATUS_WT_NEW as u32, - #[allow(missing_docs)] - const STATUS_WT_MODIFIED = raw::GIT_STATUS_WT_MODIFIED as u32, - #[allow(missing_docs)] - const STATUS_WT_DELETED = raw::GIT_STATUS_WT_DELETED as u32, - #[allow(missing_docs)] - const STATUS_WT_TYPECHANGE = raw::GIT_STATUS_WT_TYPECHANGE as u32, - #[allow(missing_docs)] - const STATUS_WT_RENAMED = raw::GIT_STATUS_WT_RENAMED as u32, - - #[allow(missing_docs)] - const STATUS_IGNORED = raw::GIT_STATUS_IGNORED as u32, - #[allow(missing_docs)] - const STATUS_CONFLICTED = raw::GIT_STATUS_CONFLICTED as u32, - } -} - -bitflags! { - /// Mode options for RepositoryInitOptions - pub flags RepositoryInitMode: u32 { - /// Use permissions configured by umask - the default - const REPOSITORY_INIT_SHARED_UMASK = - raw::GIT_REPOSITORY_INIT_SHARED_UMASK as u32, - /// Use `--shared=group` behavior, chmod'ing the new repo to be - /// group writable and \"g+sx\" for sticky group assignment - const REPOSITORY_INIT_SHARED_GROUP = - raw::GIT_REPOSITORY_INIT_SHARED_GROUP as u32, - /// Use `--shared=all` behavior, adding world readability. - const REPOSITORY_INIT_SHARED_ALL = - raw::GIT_REPOSITORY_INIT_SHARED_ALL as u32, - } -} - -/// What type of change is described by a `DiffDelta`? -#[derive(Copy, Clone, Debug)] -pub enum Delta { - /// No changes - Unmodified, - /// Entry does not exist in old version - Added, - /// Entry does not exist in new version - Deleted, - /// Entry content changed between old and new - Modified, - /// Entry was renamed wbetween old and new - Renamed, - /// Entry was copied from another old entry - Copied, - /// Entry is ignored item in workdir - Ignored, - /// Entry is untracked item in workdir - Untracked, - /// Type of entry changed between old and new - Typechange, - /// Entry is unreadable - Unreadable, - /// Entry in the index is conflicted - Conflicted, -} - -bitflags! { - /// Return codes for submodule status. - /// - /// A combination of these flags will be returned to describe the status of a - /// submodule. Depending on the "ignore" property of the submodule, some of - /// the flags may never be returned because they indicate changes that are - /// supposed to be ignored. - /// - /// Submodule info is contained in 4 places: the HEAD tree, the index, config - /// files (both .git/config and .gitmodules), and the working directory. Any - /// or all of those places might be missing information about the submodule - /// depending on what state the repo is in. We consider all four places to - /// build the combination of status flags. - /// - /// There are four values that are not really status, but give basic info - /// about what sources of submodule data are available. These will be - /// returned even if ignore is set to "ALL". - /// - /// * IN_HEAD - superproject head contains submodule - /// * IN_INDEX - superproject index contains submodule - /// * IN_CONFIG - superproject gitmodules has submodule - /// * IN_WD - superproject workdir has submodule - /// - /// The following values will be returned so long as ignore is not "ALL". - /// - /// * INDEX_ADDED - in index, not in head - /// * INDEX_DELETED - in head, not in index - /// * INDEX_MODIFIED - index and head don't match - /// * WD_UNINITIALIZED - workdir contains empty directory - /// * WD_ADDED - in workdir, not index - /// * WD_DELETED - in index, not workdir - /// * WD_MODIFIED - index and workdir head don't match - /// - /// The following can only be returned if ignore is "NONE" or "UNTRACKED". - /// - /// * WD_INDEX_MODIFIED - submodule workdir index is dirty - /// * WD_WD_MODIFIED - submodule workdir has modified files - /// - /// Lastly, the following will only be returned for ignore "NONE". - /// - /// * WD_UNTRACKED - wd contains untracked files - pub flags SubmoduleStatus: u32 { - #[allow(missing_docs)] - const SUBMODULE_STATUS_IN_HEAD = - raw::GIT_SUBMODULE_STATUS_IN_HEAD as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_IN_INDEX = - raw::GIT_SUBMODULE_STATUS_IN_INDEX as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_IN_CONFIG = - raw::GIT_SUBMODULE_STATUS_IN_CONFIG as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_IN_WD = - raw::GIT_SUBMODULE_STATUS_IN_WD as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_INDEX_ADDED = - raw::GIT_SUBMODULE_STATUS_INDEX_ADDED as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_INDEX_DELETED = - raw::GIT_SUBMODULE_STATUS_INDEX_DELETED as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_INDEX_MODIFIED = - raw::GIT_SUBMODULE_STATUS_INDEX_MODIFIED as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_WD_UNINITIALIZED = - raw::GIT_SUBMODULE_STATUS_WD_UNINITIALIZED as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_WD_ADDED = - raw::GIT_SUBMODULE_STATUS_WD_ADDED as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_WD_DELETED = - raw::GIT_SUBMODULE_STATUS_WD_DELETED as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_WD_MODIFIED = - raw::GIT_SUBMODULE_STATUS_WD_MODIFIED as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_WD_INDEX_MODIFIED = - raw::GIT_SUBMODULE_STATUS_WD_INDEX_MODIFIED as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_WD_WD_MODIFIED = - raw::GIT_SUBMODULE_STATUS_WD_WD_MODIFIED as u32, - #[allow(missing_docs)] - const SUBMODULE_STATUS_WD_UNTRACKED = - raw::GIT_SUBMODULE_STATUS_WD_UNTRACKED as u32, - } - -} - -/// Submodule ignore values -/// -/// These values represent settings for the `submodule.$name.ignore` -/// configuration value which says how deeply to look at the working -/// directory when getting the submodule status. -pub enum SubmoduleIgnore { - /// Use the submodule's configuration - Unspecified, - /// Any change or untracked file is considered dirty - None, - /// Only dirty if tracked files have changed - Untracked, - /// Only dirty if HEAD has moved - Dirty, - /// Never dirty - All, -} - -bitflags! { - /// ... - pub flags PathspecFlags: u32 { - /// Use the default pathspec matching configuration. - const PATHSPEC_DEFAULT = raw::GIT_PATHSPEC_DEFAULT as u32, - /// Force matching to ignore case, otherwise matching will use native - /// case sensitivity fo the platform filesystem. - const PATHSPEC_IGNORE_CASE = raw::GIT_PATHSPEC_IGNORE_CASE as u32, - /// Force case sensitive matches, otherwise match will use the native - /// case sensitivity of the platform filesystem. - const PATHSPEC_USE_CASE = raw::GIT_PATHSPEC_USE_CASE as u32, - /// Disable glob patterns and just use simple string comparison for - /// matching. - const PATHSPEC_NO_GLOB = raw::GIT_PATHSPEC_NO_GLOB as u32, - /// Means that match functions return the error code `NotFound` if no - /// matches are found. By default no matches is a success. - const PATHSPEC_NO_MATCH_ERROR = raw::GIT_PATHSPEC_NO_MATCH_ERROR as u32, - /// Means that the list returned should track which patterns matched - /// which files so that at the end of the match we can identify patterns - /// that did not match any files. - const PATHSPEC_FIND_FAILURES = raw::GIT_PATHSPEC_FIND_FAILURES as u32, - /// Means that the list returned does not need to keep the actual - /// matching filenames. Use this to just test if there were any matches - /// at all or in combination with `PATHSPEC_FAILURES` to validate a - /// pathspec. - const PATHSPEC_FAILURES_ONLY = raw::GIT_PATHSPEC_FAILURES_ONLY as u32, - } -} - -bitflags! { - /// Types of notifications emitted from checkouts. - pub flags CheckoutNotificationType: u32 { - /// Notification about a conflict. - const CHECKOUT_NOTIFICATION_CONFLICT = raw::GIT_CHECKOUT_NOTIFY_CONFLICT as u32, - /// Notification about a dirty file. - const CHECKOUT_NOTIFICATION_DIRTY = raw::GIT_CHECKOUT_NOTIFY_DIRTY as u32, - /// Notification about an updated file. - const CHECKOUT_NOTIFICATION_UPDATED = raw::GIT_CHECKOUT_NOTIFY_UPDATED as u32, - /// Notification about an untracked file. - const CHECKOUT_NOTIFICATION_UNTRACKED = raw::GIT_CHECKOUT_NOTIFY_UNTRACKED as u32, - /// Notification about an ignored file. - const CHECKOUT_NOTIFICATION_IGNORED = raw::GIT_CHECKOUT_NOTIFY_IGNORED as u32, - } -} - -/// Possible output formats for diff data -#[derive(Copy, Clone)] -pub enum DiffFormat { - /// full git diff - Patch, - /// just the headers of the patch - PatchHeader, - /// like git diff --raw - Raw, - /// like git diff --name-only - NameOnly, - /// like git diff --name-status - NameStatus, -} - -bitflags! { - /// Formatting options for diff stats - pub flags DiffStatsFormat: raw::git_diff_stats_format_t { - /// Don't generate any stats - const DIFF_STATS_NONE = raw::GIT_DIFF_STATS_NONE, - /// Equivalent of `--stat` in git - const DIFF_STATS_FULL = raw::GIT_DIFF_STATS_FULL, - /// Equivalent of `--shortstat` in git - const DIFF_STATS_SHORT = raw::GIT_DIFF_STATS_SHORT, - /// Equivalent of `--numstat` in git - const DIFF_STATS_NUMBER = raw::GIT_DIFF_STATS_NUMBER, - /// Extended header information such as creations, renames and mode - /// changes, equivalent of `--summary` in git - const DIFF_STATS_INCLUDE_SUMMARY = - raw::GIT_DIFF_STATS_INCLUDE_SUMMARY, - } -} - -/// Automatic tag following options. -pub enum AutotagOption { - /// Use the setting from the remote's configuration - Unspecified, - /// Ask the server for tags pointing to objects we're already downloading - Auto, - /// Don't ask for any tags beyond the refspecs - None, - /// Ask for all the tags - All, -} - -/// Configuration for how pruning is done on a fetch -pub enum FetchPrune { - /// Use the setting from the configuration - Unspecified, - /// Force pruning on - On, - /// Force pruning off - Off, -} - -#[cfg(test)] -mod tests { - use super::ObjectType; - - #[test] - fn convert() { - assert_eq!(ObjectType::Blob.str(), "blob"); - assert_eq!(ObjectType::from_str("blob"), Some(ObjectType::Blob)); - assert!(ObjectType::Blob.is_loose()); - } - -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/merge.rs cargo-0.19.0/vendor/git2-0.6.3/src/merge.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/merge.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/merge.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,154 +0,0 @@ -use std::marker; -use std::mem; -use libc::c_uint; - -use {raw, Oid, Commit, FileFavor}; -use util::Binding; -use call::Convert; - -/// A structure to represent an annotated commit, the input to merge and rebase. -/// -/// An annotated commit contains information about how it was looked up, which -/// may be useful for functions like merge or rebase to provide context to the -/// operation. -pub struct AnnotatedCommit<'repo> { - raw: *mut raw::git_annotated_commit, - _marker: marker::PhantomData>, -} - -/// Options to specify when merging. -pub struct MergeOptions { - raw: raw::git_merge_options, -} - -impl<'repo> AnnotatedCommit<'repo> { - /// Gets the commit ID that the given git_annotated_commit refers to - pub fn id(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_annotated_commit_id(self.raw)) } - } -} - -impl MergeOptions { - /// Creates a default set of merge options. - pub fn new() -> MergeOptions { - let mut opts = MergeOptions { - raw: unsafe { mem::zeroed() }, - }; - assert_eq!(unsafe { - raw::git_merge_init_options(&mut opts.raw, 1) - }, 0); - opts - } - - /// Detect file renames - pub fn find_renames(&mut self, find: bool) -> &mut MergeOptions { - if find { - self.raw.flags |= raw::GIT_MERGE_FIND_RENAMES; - } else { - self.raw.flags &= !raw::GIT_MERGE_FIND_RENAMES; - } - self - } - - /// Similarity to consider a file renamed (default 50) - pub fn rename_threshold(&mut self, thresh: u32) -> &mut MergeOptions { - self.raw.rename_threshold = thresh; - self - } - - /// Maximum similarity sources to examine for renames (default 200). - /// If the number of rename candidates (add / delete pairs) is greater - /// than this value, inexact rename detection is aborted. This setting - /// overrides the `merge.renameLimit` configuration value. - pub fn target_limit(&mut self, limit: u32) -> &mut MergeOptions { - self.raw.target_limit = limit as c_uint; - self - } - - /// Maximum number of times to merge common ancestors to build a - /// virtual merge base when faced with criss-cross merges. When - /// this limit is reached, the next ancestor will simply be used - /// instead of attempting to merge it. The default is unlimited. - pub fn recursion_limit(&mut self, limit: u32) -> &mut MergeOptions { - self.raw.recursion_limit = limit as c_uint; - self - } - - /// Specify a side to favor for resolving conflicts - pub fn file_favor(&mut self, favor: FileFavor) -> &mut MergeOptions { - self.raw.file_favor = favor.convert(); - self - } - - fn flag(&mut self, opt: raw::git_merge_file_flag_t, val: bool) -> &mut MergeOptions { - if val { - self.raw.file_flags |= opt; - } else { - self.raw.file_flags &= !opt; - } - self - } - - /// Create standard conflicted merge files - pub fn standard_style(&mut self, standard: bool) -> &mut MergeOptions { - self.flag(raw::GIT_MERGE_FILE_STYLE_MERGE, standard) - } - - /// Create diff3-style file - pub fn diff3_style(&mut self, diff3: bool) -> &mut MergeOptions { - self.flag(raw::GIT_MERGE_FILE_STYLE_DIFF3, diff3) - } - - /// Condense non-alphanumeric regions for simplified diff file - pub fn simplify_alnum(&mut self, simplify: bool) -> &mut MergeOptions { - self.flag(raw::GIT_MERGE_FILE_SIMPLIFY_ALNUM, simplify) - } - - /// Ignore all whitespace - pub fn ignore_whitespace(&mut self, ignore: bool) -> &mut MergeOptions { - self.flag(raw::GIT_MERGE_FILE_IGNORE_WHITESPACE, ignore) - } - - /// Ignore changes in amount of whitespace - pub fn ignore_whitespace_change(&mut self, ignore: bool) -> &mut MergeOptions { - self.flag(raw::GIT_MERGE_FILE_IGNORE_WHITESPACE_CHANGE, ignore) - } - - /// Ignore whitespace at end of line - pub fn ignore_whitespace_eol(&mut self, ignore: bool) -> &mut MergeOptions { - self.flag(raw::GIT_MERGE_FILE_IGNORE_WHITESPACE_EOL, ignore) - } - - /// Use the "patience diff" algorithm - pub fn patience(&mut self, patience: bool) -> &mut MergeOptions { - self.flag(raw::GIT_MERGE_FILE_DIFF_PATIENCE, patience) - } - - /// Take extra time to find minimal diff - pub fn minimal(&mut self, minimal: bool) -> &mut MergeOptions { - self.flag(raw::GIT_MERGE_FILE_DIFF_MINIMAL, minimal) - } - - /// Acquire a pointer to the underlying raw options. - pub unsafe fn raw(&self) -> *const raw::git_merge_options { - &self.raw as *const _ - } -} - -impl<'repo> Binding for AnnotatedCommit<'repo> { - type Raw = *mut raw::git_annotated_commit; - unsafe fn from_raw(raw: *mut raw::git_annotated_commit) - -> AnnotatedCommit<'repo> { - AnnotatedCommit { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *mut raw::git_annotated_commit { self.raw } -} - -impl<'repo> Drop for AnnotatedCommit<'repo> { - fn drop(&mut self) { - unsafe { raw::git_annotated_commit_free(self.raw) } - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/message.rs cargo-0.19.0/vendor/git2-0.6.3/src/message.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/message.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/message.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ -use std::ffi::CString; - -use libc::{c_char, c_int}; - -use {raw, Buf, Error, IntoCString}; -use util::Binding; - -/// Clean up a message, removing extraneous whitespace, and ensure that the -/// message ends with a newline. If comment_char is Some, also remove comment -/// lines starting with that character. -pub fn message_prettify(message: T, comment_char: Option) - -> Result { - _message_prettify(try!(message.into_c_string()), comment_char) -} - -fn _message_prettify(message: CString, comment_char: Option) - -> Result { - let ret = Buf::new(); - unsafe { - try_call!(raw::git_message_prettify(ret.raw(), message, - comment_char.is_some() as c_int, - comment_char.unwrap_or(0) as c_char)); - } - Ok(ret.as_str().unwrap().to_string()) -} - -/// The default comment character for message_prettify ('#') -pub const DEFAULT_COMMENT_CHAR: Option = Some('#' as u8); - -#[cfg(test)] -mod tests { - use {message_prettify, DEFAULT_COMMENT_CHAR}; - - #[test] - fn prettify() { - // This does not attempt to duplicate the extensive tests for - // git_message_prettify in libgit2, just a few representative values to - // make sure the interface works as expected. - assert_eq!(message_prettify("1\n\n\n2", None).unwrap(), - "1\n\n2\n"); - assert_eq!(message_prettify("1\n\n\n2\n\n\n3", None).unwrap(), - "1\n\n2\n\n3\n"); - assert_eq!(message_prettify("1\n# comment\n# more", None).unwrap(), - "1\n# comment\n# more\n"); - assert_eq!(message_prettify("1\n# comment\n# more", - DEFAULT_COMMENT_CHAR).unwrap(), - "1\n"); - assert_eq!(message_prettify("1\n; comment\n; more", - Some(';' as u8)).unwrap(), - "1\n"); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/note.rs cargo-0.19.0/vendor/git2-0.6.3/src/note.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/note.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/note.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -use std::marker; -use std::str; - -use {raw, signature, Signature, Oid, Repository, Error}; -use util::Binding; - -/// A structure representing a [note][note] in git. -/// -/// [note]: http://git-scm.com/blog/2010/08/25/notes.html -pub struct Note<'repo> { - raw: *mut raw::git_note, - - // Hmm, the current libgit2 version does not have this inside of it, but - // perhaps it's a good idea to keep it around? Can always remove it later I - // suppose... - _marker: marker::PhantomData<&'repo Repository>, -} - -/// An iterator over all of the notes within a repository. -pub struct Notes<'repo> { - raw: *mut raw::git_note_iterator, - _marker: marker::PhantomData<&'repo Repository>, -} - -impl<'repo> Note<'repo> { - /// Get the note author - pub fn author(&self) -> Signature { - unsafe { - signature::from_raw_const(self, raw::git_note_author(&*self.raw)) - } - } - - /// Get the note committer - pub fn committer(&self) -> Signature { - unsafe { - signature::from_raw_const(self, raw::git_note_committer(&*self.raw)) - } - } - - /// Get the note message, in bytes. - pub fn message_bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, raw::git_note_message(&*self.raw)).unwrap() } - } - - /// Get the note message as a string, returning `None` if it is not UTF-8. - pub fn message(&self) -> Option<&str> { - str::from_utf8(self.message_bytes()).ok() - } - - /// Get the note object's id - pub fn id(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_note_id(&*self.raw)) } - } -} - -impl<'repo> Binding for Note<'repo> { - type Raw = *mut raw::git_note; - unsafe fn from_raw(raw: *mut raw::git_note) -> Note<'repo> { - Note { raw: raw, _marker: marker::PhantomData, } - } - fn raw(&self) -> *mut raw::git_note { self.raw } -} - - -impl<'repo> Drop for Note<'repo> { - fn drop(&mut self) { - unsafe { raw::git_note_free(self.raw); } - } -} - -impl<'repo> Binding for Notes<'repo> { - type Raw = *mut raw::git_note_iterator; - unsafe fn from_raw(raw: *mut raw::git_note_iterator) -> Notes<'repo> { - Notes { raw: raw, _marker: marker::PhantomData, } - } - fn raw(&self) -> *mut raw::git_note_iterator { self.raw } -} - -impl<'repo> Iterator for Notes<'repo> { - type Item = Result<(Oid, Oid), Error>; - fn next(&mut self) -> Option> { - let mut note_id = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - let mut annotated_id = note_id; - unsafe { - try_call_iter!(raw::git_note_next(&mut note_id, &mut annotated_id, - self.raw)); - Some(Ok((Binding::from_raw(¬e_id as *const _), - Binding::from_raw(&annotated_id as *const _)))) - } - } -} - -impl<'repo> Drop for Notes<'repo> { - fn drop(&mut self) { - unsafe { raw::git_note_iterator_free(self.raw); } - } -} - -#[cfg(test)] -mod tests { - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - assert!(repo.notes(None).is_err()); - - let sig = repo.signature().unwrap(); - let head = repo.head().unwrap().target().unwrap(); - let note = repo.note(&sig, &sig, None, head, "foo", false).unwrap(); - assert_eq!(repo.notes(None).unwrap().count(), 1); - - let note_obj = repo.find_note(None, head).unwrap(); - assert_eq!(note_obj.id(), note); - assert_eq!(note_obj.message(), Some("foo")); - - let (a, b) = repo.notes(None).unwrap().next().unwrap().unwrap(); - assert_eq!(a, note); - assert_eq!(b, head); - - assert_eq!(repo.note_default_ref().unwrap(), "refs/notes/commits"); - - assert_eq!(sig.name(), note_obj.author().name()); - assert_eq!(sig.name(), note_obj.committer().name()); - assert!(sig.when() == note_obj.committer().when()); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/object.rs cargo-0.19.0/vendor/git2-0.6.3/src/object.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/object.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/object.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,174 +0,0 @@ -use std::marker; -use std::mem; -use std::ptr; - -use {raw, Oid, ObjectType, Error, Buf, Commit, Tag, Blob, Tree, Repository}; -use {Describe, DescribeOptions}; -use util::Binding; - -/// A structure to represent a git [object][1] -/// -/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects -pub struct Object<'repo> { - raw: *mut raw::git_object, - _marker: marker::PhantomData<&'repo Repository>, -} - -impl<'repo> Object<'repo> { - /// Get the id (SHA1) of a repository object - pub fn id(&self) -> Oid { - unsafe { - Binding::from_raw(raw::git_object_id(&*self.raw)) - } - } - - /// Get the object type of an object. - /// - /// If the type is unknown, then `None` is returned. - pub fn kind(&self) -> Option { - ObjectType::from_raw(unsafe { raw::git_object_type(&*self.raw) }) - } - - /// Recursively peel an object until an object of the specified type is met. - /// - /// If you pass `Any` as the target type, then the object will be - /// peeled until the type changes (e.g. a tag will be chased until the - /// referenced object is no longer a tag). - pub fn peel(&self, kind: ObjectType) -> Result, Error> { - let mut raw = 0 as *mut raw::git_object; - unsafe { - try_call!(raw::git_object_peel(&mut raw, &*self.raw(), kind)); - Ok(Binding::from_raw(raw)) - } - } - - /// Get a short abbreviated OID string for the object - /// - /// This starts at the "core.abbrev" length (default 7 characters) and - /// iteratively extends to a longer string if that length is ambiguous. The - /// result will be unambiguous (at least until new objects are added to the - /// repository). - pub fn short_id(&self) -> Result { - unsafe { - let buf = Buf::new(); - try_call!(raw::git_object_short_id(buf.raw(), &*self.raw())); - Ok(buf) - } - } - - /// Attempt to view this object as a commit. - /// - /// Returns `None` if the object is not actually a commit. - pub fn as_commit(&self) -> Option<&Commit<'repo>> { - self.cast(ObjectType::Commit) - } - - /// Attempt to consume this object and return a commit. - /// - /// Returns `Err(self)` if this object is not actually a commit. - pub fn into_commit(self) -> Result, Object<'repo>> { - self.cast_into(ObjectType::Commit) - } - - /// Attempt to view this object as a tag. - /// - /// Returns `None` if the object is not actually a tag. - pub fn as_tag(&self) -> Option<&Tag<'repo>> { - self.cast(ObjectType::Tag) - } - - /// Attempt to consume this object and return a tag. - /// - /// Returns `Err(self)` if this object is not actually a tag. - pub fn into_tag(self) -> Result, Object<'repo>> { - self.cast_into(ObjectType::Tag) - } - - /// Attempt to view this object as a tree. - /// - /// Returns `None` if the object is not actually a tree. - pub fn as_tree(&self) -> Option<&Tree<'repo>> { - self.cast(ObjectType::Tree) - } - - /// Attempt to consume this object and return a tree. - /// - /// Returns `Err(self)` if this object is not actually a tree. - pub fn into_tree(self) -> Result, Object<'repo>> { - self.cast_into(ObjectType::Tree) - } - - /// Attempt to view this object as a blob. - /// - /// Returns `None` if the object is not actually a blob. - pub fn as_blob(&self) -> Option<&Blob<'repo>> { - self.cast(ObjectType::Blob) - } - - /// Attempt to consume this object and return a blob. - /// - /// Returns `Err(self)` if this object is not actually a blob. - pub fn into_blob(self) -> Result, Object<'repo>> { - self.cast_into(ObjectType::Blob) - } - - /// Describes a commit - /// - /// Performs a describe operation on this commitish object. - pub fn describe(&self, opts: &DescribeOptions) - -> Result { - let mut ret = 0 as *mut _; - unsafe { - try_call!(raw::git_describe_commit(&mut ret, self.raw, opts.raw())); - Ok(Binding::from_raw(ret)) - } - } - - fn cast(&self, kind: ObjectType) -> Option<&T> { - assert_eq!(mem::size_of::(), mem::size_of::()); - if self.kind() == Some(kind) { - unsafe { Some(&*(self as *const _ as *const T)) } - } else { - None - } - } - - fn cast_into(self, kind: ObjectType) -> Result> { - assert_eq!(mem::size_of_val(&self), mem::size_of::()); - if self.kind() == Some(kind) { - Ok(unsafe { - let other = ptr::read(&self as *const _ as *const T); - mem::forget(self); - other - }) - } else { - Err(self) - } - } -} - -impl<'repo> Clone for Object<'repo> { - fn clone(&self) -> Object<'repo> { - let mut raw = 0 as *mut raw::git_object; - unsafe { - let rc = raw::git_object_dup(&mut raw, self.raw); - assert_eq!(rc, 0); - Binding::from_raw(raw) - } - } -} - -impl<'repo> Binding for Object<'repo> { - type Raw = *mut raw::git_object; - - unsafe fn from_raw(raw: *mut raw::git_object) -> Object<'repo> { - Object { raw: raw, _marker: marker::PhantomData, } - } - fn raw(&self) -> *mut raw::git_object { self.raw } -} - -impl<'repo> Drop for Object<'repo> { - fn drop(&mut self) { - unsafe { raw::git_object_free(self.raw) } - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/oid_array.rs cargo-0.19.0/vendor/git2-0.6.3/src/oid_array.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/oid_array.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/oid_array.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -//! Bindings to libgit2's raw git_strarray type - -use std::ops::Deref; - -use oid::Oid; -use raw; -use util::Binding; -use std::slice; -use std::mem; - -/// An oid array structure used by libgit2 -/// -/// Some apis return arrays of oids which originate from libgit2. This -/// wrapper type behaves a little like `Vec<&Oid>` but does so without copying -/// the underlying Oids until necessary. -pub struct OidArray { - raw: raw::git_oidarray, -} - -impl Deref for OidArray { - type Target = [Oid]; - - fn deref(&self) -> &[Oid] { - unsafe { - debug_assert_eq!(mem::size_of::(), mem::size_of_val(&*self.raw.ids)); - - slice::from_raw_parts(self.raw.ids as *const Oid, self.raw.count as usize) - } - } -} - -impl Binding for OidArray { - type Raw = raw::git_oidarray; - unsafe fn from_raw(raw: raw::git_oidarray) -> OidArray { - OidArray { raw: raw } - } - fn raw(&self) -> raw::git_oidarray { self.raw } -} - -impl Drop for OidArray { - fn drop(&mut self) { - unsafe { raw::git_oidarray_free(&mut self.raw) } - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/oid.rs cargo-0.19.0/vendor/git2-0.6.3/src/oid.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/oid.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/oid.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,144 +0,0 @@ -use std::fmt; -use std::cmp::Ordering; -use std::hash::{Hasher, Hash}; -use std::str; -use libc; - -use {raw, Error}; -use util::Binding; - -/// Unique identity of any object (commit, tree, blob, tag). -#[derive(Copy)] -pub struct Oid { - raw: raw::git_oid, -} - -impl Oid { - /// Parse a hex-formatted object id into an Oid structure. - /// - /// If the string is not a valid 40-character hex string, an error is - /// returned. - pub fn from_str(s: &str) -> Result { - ::init(); - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_oid_fromstrn(&mut raw, - s.as_bytes().as_ptr() - as *const libc::c_char, - s.len() as libc::size_t)); - } - Ok(Oid { raw: raw }) - } - - /// Parse a raw object id into an Oid structure. - /// - /// If the array given is not 20 bytes in length, an error is returned. - pub fn from_bytes(bytes: &[u8]) -> Result { - ::init(); - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - if bytes.len() != raw::GIT_OID_RAWSZ { - Err(Error::from_str("raw byte array must be 20 bytes")) - } else { - unsafe { raw::git_oid_fromraw(&mut raw, bytes.as_ptr()) } - Ok(Oid { raw: raw }) - } - } - - /// View this OID as a byte-slice 20 bytes in length. - pub fn as_bytes(&self) -> &[u8] { &self.raw.id } - - /// Test if this OID is all zeros. - pub fn is_zero(&self) -> bool { - unsafe { raw::git_oid_iszero(&self.raw) == 1 } - } -} - -impl Binding for Oid { - type Raw = *const raw::git_oid; - - unsafe fn from_raw(oid: *const raw::git_oid) -> Oid { - Oid { raw: *oid } - } - fn raw(&self) -> *const raw::git_oid { &self.raw as *const _ } -} - -impl fmt::Debug for Oid { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for Oid { - /// Hex-encode this Oid into a formatter. - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut dst = [0u8; raw::GIT_OID_HEXSZ + 1]; - unsafe { - raw::git_oid_tostr(dst.as_mut_ptr() as *mut libc::c_char, - dst.len() as libc::size_t, &self.raw); - } - let s = &dst[..dst.iter().position(|&a| a == 0).unwrap()]; - str::from_utf8(s).unwrap().fmt(f) - } -} - -impl str::FromStr for Oid { - type Err = Error; - - /// Parse a hex-formatted object id into an Oid structure. - /// - /// If the string is not a valid 40-character hex string, an error is - /// returned. - fn from_str(s: &str) -> Result { - Oid::from_str(s) - } -} - -impl PartialEq for Oid { - fn eq(&self, other: &Oid) -> bool { - unsafe { raw::git_oid_equal(&self.raw, &other.raw) != 0 } - } -} -impl Eq for Oid {} - -impl PartialOrd for Oid { - fn partial_cmp(&self, other: &Oid) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Oid { - fn cmp(&self, other: &Oid) -> Ordering { - match unsafe { raw::git_oid_cmp(&self.raw, &other.raw) } { - 0 => Ordering::Equal, - n if n < 0 => Ordering::Less, - _ => Ordering::Greater, - } - } -} - -impl Clone for Oid { - fn clone(&self) -> Oid { *self } -} - -impl Hash for Oid { - fn hash(&self, into: &mut H) { - self.raw.id.hash(into) - } -} - -impl AsRef<[u8]> for Oid { - fn as_ref(&self) -> &[u8] { self.as_bytes() } -} - -#[cfg(test)] -mod tests { - use super::Oid; - - #[test] - fn conversions() { - assert!(Oid::from_str("foo").is_err()); - assert!(Oid::from_str("decbf2be529ab6557d5429922251e5ee36519817").is_ok()); - assert!(Oid::from_bytes(b"foo").is_err()); - assert!(Oid::from_bytes(b"00000000000000000000").is_ok()); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/packbuilder.rs cargo-0.19.0/vendor/git2-0.6.3/src/packbuilder.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/packbuilder.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/packbuilder.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,386 +0,0 @@ -use std::marker; -use std::ptr; -use std::slice; -use libc::{c_int, c_uint, c_void, size_t}; - -use {raw, panic, Repository, Error, Oid, Revwalk, Buf}; -use util::Binding; - -/// Stages that are reported by the PackBuilder progress callback. -pub enum PackBuilderStage { - /// Adding objects to the pack - AddingObjects, - /// Deltafication of the pack - Deltafication, -} - -pub type ProgressCb<'a> = FnMut(PackBuilderStage, u32, u32) -> bool + 'a; -pub type ForEachCb<'a> = FnMut(&[u8]) -> bool + 'a; - -/// A builder for creating a packfile -pub struct PackBuilder<'repo> { - raw: *mut raw::git_packbuilder, - progress: Option>>>, - _marker: marker::PhantomData<&'repo Repository>, -} - -impl<'repo> PackBuilder<'repo> { - /// Insert a single object. For an optimal pack it's mandatory to insert - /// objects in recency order, commits followed by trees and blobs. - pub fn insert_object(&mut self, id: Oid, name: Option<&str>) - -> Result<(), Error> { - let name = try!(::opt_cstr(name)); - unsafe { - try_call!(raw::git_packbuilder_insert(self.raw, id.raw(), name)); - } - Ok(()) - } - - /// Insert a root tree object. This will add the tree as well as all - /// referenced trees and blobs. - pub fn insert_tree(&mut self, id: Oid) -> Result<(), Error> { - unsafe { - try_call!(raw::git_packbuilder_insert_tree(self.raw, id.raw())); - } - Ok(()) - } - - /// Insert a commit object. This will add a commit as well as the completed - /// referenced tree. - pub fn insert_commit(&mut self, id: Oid) -> Result<(), Error> { - unsafe { - try_call!(raw::git_packbuilder_insert_commit(self.raw, id.raw())); - } - Ok(()) - } - - /// Insert objects as given by the walk. Those commits and all objects they - /// reference will be inserted into the packbuilder. - pub fn insert_walk(&mut self, walk: &mut Revwalk) -> Result<(), Error> { - unsafe { - try_call!(raw::git_packbuilder_insert_walk(self.raw, walk.raw())); - } - Ok(()) - } - - /// Recursively insert an object and its referenced objects. Insert the - /// object as well as any object it references. - pub fn insert_recursive(&mut self, id: Oid, name: Option<&str>) - -> Result<(), Error> { - let name = try!(::opt_cstr(name)); - unsafe { - try_call!(raw::git_packbuilder_insert_recur(self.raw, - id.raw(), - name)); - } - Ok(()) - } - - /// Write the contents of the packfile to an in-memory buffer. The contents - /// of the buffer will become a valid packfile, even though there will be - /// no attached index. - pub fn write_buf(&mut self, buf: &mut Buf) -> Result<(), Error> { - unsafe { - try_call!(raw::git_packbuilder_write_buf(buf.raw(), self.raw)); - } - Ok(()) - } - - /// Create the new pack and pass each object to the callback. - pub fn foreach(&mut self, mut cb: F) -> Result<(), Error> - where F: FnMut(&[u8]) -> bool - { - let mut cb = &mut cb as &mut ForEachCb; - let ptr = &mut cb as *mut _; - unsafe { - try_call!(raw::git_packbuilder_foreach(self.raw, - foreach_c, - ptr as *mut _)); - } - Ok(()) - } - - /// `progress` will be called with progress information during pack - /// building. Be aware that this is called inline with pack building - /// operations, so performance may be affected. - /// - /// There can only be one progress callback attached, this will replace any - /// existing one. See `unset_progress_callback` to remove the current - /// progress callback without attaching a new one. - pub fn set_progress_callback(&mut self, progress: F) -> Result<(), Error> - where F: FnMut(PackBuilderStage, u32, u32) -> bool + 'repo - { - let mut progress = Box::new(Box::new(progress) as Box); - let ptr = &mut *progress as *mut _; - let progress_c = Some(progress_c as raw::git_packbuilder_progress); - unsafe { - try_call!(raw::git_packbuilder_set_callbacks(self.raw, - progress_c, - ptr as *mut _)); - } - self.progress = Some(progress); - Ok(()) - } - - /// Remove the current progress callback. See `set_progress_callback` to - /// set the progress callback. - pub fn unset_progress_callback(&mut self) -> Result<(), Error> { - unsafe { - try_call!(raw::git_packbuilder_set_callbacks(self.raw, - None, - ptr::null_mut())); - self.progress = None; - } - Ok(()) - } - - /// Get the total number of objects the packbuilder will write out. - pub fn object_count(&self) -> usize { - unsafe { raw::git_packbuilder_object_count(self.raw) } - } - - /// Get the number of objects the packbuilder has already written out. - pub fn written(&self) -> usize { - unsafe { raw::git_packbuilder_written(self.raw) } - } - - /// Get the packfile's hash. A packfile's name is derived from the sorted - /// hashing of all object names. This is only correct after the packfile - /// has been written. - pub fn hash(&self) -> Option { - if self.object_count() == 0 { - unsafe { - Some(Binding::from_raw(raw::git_packbuilder_hash(self.raw))) - } - } else { - None - } - } -} - -impl<'repo> Binding for PackBuilder<'repo> { - type Raw = *mut raw::git_packbuilder; - unsafe fn from_raw(ptr: *mut raw::git_packbuilder) -> PackBuilder<'repo> { - PackBuilder { - raw: ptr, - progress: None, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *mut raw::git_packbuilder { - self.raw - } -} - -impl<'repo> Drop for PackBuilder<'repo> { - fn drop(&mut self) { - unsafe { - raw::git_packbuilder_set_callbacks(self.raw, None, ptr::null_mut()); - raw::git_packbuilder_free(self.raw); - } - } -} - -impl Binding for PackBuilderStage { - type Raw = raw::git_packbuilder_stage_t; - unsafe fn from_raw(raw: raw::git_packbuilder_stage_t) -> PackBuilderStage { - match raw { - raw::GIT_PACKBUILDER_ADDING_OBJECTS => PackBuilderStage::AddingObjects, - raw::GIT_PACKBUILDER_DELTAFICATION => PackBuilderStage::Deltafication, - _ => panic!("Unknown git diff binary kind"), - } - } - fn raw(&self) -> raw::git_packbuilder_stage_t { - match *self { - PackBuilderStage::AddingObjects => raw::GIT_PACKBUILDER_ADDING_OBJECTS, - PackBuilderStage::Deltafication => raw::GIT_PACKBUILDER_DELTAFICATION, - } - } -} - -extern fn foreach_c(buf: *const c_void, - size: size_t, - data: *mut c_void) - -> c_int { - unsafe { - let buf = slice::from_raw_parts(buf as *const u8, size as usize); - - let r = panic::wrap(|| { - let data = data as *mut &mut ForEachCb; - (*data)(buf) - }); - if r == Some(true) { - 0 - } else { - -1 - } - } -} - -extern fn progress_c(stage: raw::git_packbuilder_stage_t, - current: c_uint, - total: c_uint, - data: *mut c_void) - -> c_int { - unsafe { - let stage = Binding::from_raw(stage); - - let r = panic::wrap(|| { - let data = data as *mut Box; - (*data)(stage, current, total) - }); - if r == Some(true) { - 0 - } else { - -1 - } - } -} - -#[cfg(test)] -mod tests { - use std::fs::File; - use std::path::Path; - use {Buf, Repository, Oid}; - - fn commit(repo: &Repository) -> (Oid, Oid) { - let mut index = t!(repo.index()); - let root = repo.path().parent().unwrap(); - t!(File::create(&root.join("foo"))); - t!(index.add_path(Path::new("foo"))); - - let tree_id = t!(index.write_tree()); - let tree = t!(repo.find_tree(tree_id)); - let sig = t!(repo.signature()); - let head_id = t!(repo.refname_to_id("HEAD")); - let parent = t!(repo.find_commit(head_id)); - let commit = t!(repo.commit(Some("HEAD"), - &sig, - &sig, - "commit", - &tree, - &[&parent])); - (commit, tree_id) - } - - fn pack_header(len: u8) -> Vec { - [].into_iter() - .chain(b"PACK") // signature - .chain(&[0, 0, 0, 2]) // version number - .chain(&[0, 0, 0, len]) // number of objects - .cloned().collect::>() - } - - fn empty_pack_header() -> Vec { - pack_header(0).iter() - .chain(&[0x02, 0x9d, 0x08, 0x82, 0x3b, // ^ - 0xd8, 0xa8, 0xea, 0xb5, 0x10, // | SHA-1 of the zero - 0xad, 0x6a, 0xc7, 0x5c, 0x82, // | object pack header - 0x3c, 0xfd, 0x3e, 0xd3, 0x1e]) // v - .cloned().collect::>() - } - - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - let _builder = t!(repo.packbuilder()); - } - - #[test] - fn smoke_write_buf() { - let (_td, repo) = ::test::repo_init(); - let mut builder = t!(repo.packbuilder()); - let mut buf = Buf::new(); - t!(builder.write_buf(&mut buf)); - assert!(builder.hash().unwrap().is_zero()); - assert_eq!(&*buf, &*empty_pack_header()); - } - - #[test] - fn smoke_foreach() { - let (_td, repo) = ::test::repo_init(); - let mut builder = t!(repo.packbuilder()); - let mut buf = Vec::::new(); - t!(builder.foreach(|bytes| { - buf.extend(bytes); - true - })); - assert_eq!(&*buf, &*empty_pack_header()); - } - - #[test] - fn insert_write_buf() { - let (_td, repo) = ::test::repo_init(); - let mut builder = t!(repo.packbuilder()); - let mut buf = Buf::new(); - let (commit, _tree) = commit(&repo); - t!(builder.insert_object(commit, None)); - assert_eq!(builder.object_count(), 1); - t!(builder.write_buf(&mut buf)); - // Just check that the correct number of objects are written - assert_eq!(&buf[0..12], &*pack_header(1)); - } - - #[test] - fn insert_tree_write_buf() { - let (_td, repo) = ::test::repo_init(); - let mut builder = t!(repo.packbuilder()); - let mut buf = Buf::new(); - let (_commit, tree) = commit(&repo); - // will insert the tree itself and the blob, 2 objects - t!(builder.insert_tree(tree)); - assert_eq!(builder.object_count(), 2); - t!(builder.write_buf(&mut buf)); - // Just check that the correct number of objects are written - assert_eq!(&buf[0..12], &*pack_header(2)); - } - - #[test] - fn insert_commit_write_buf() { - let (_td, repo) = ::test::repo_init(); - let mut builder = t!(repo.packbuilder()); - let mut buf = Buf::new(); - let (commit, _tree) = commit(&repo); - // will insert the commit, its tree and the blob, 3 objects - t!(builder.insert_commit(commit)); - assert_eq!(builder.object_count(), 3); - t!(builder.write_buf(&mut buf)); - // Just check that the correct number of objects are written - assert_eq!(&buf[0..12], &*pack_header(3)); - } - - #[test] - fn progress_callback() { - let mut progress_called = false; - { - let (_td, repo) = ::test::repo_init(); - let mut builder = t!(repo.packbuilder()); - let (commit, _tree) = commit(&repo); - t!(builder.set_progress_callback(|_, _, _| { - progress_called = true; - true - })); - t!(builder.insert_commit(commit)); - t!(builder.write_buf(&mut Buf::new())); - } - assert_eq!(progress_called, true); - } - - #[test] - fn clear_progress_callback() { - let mut progress_called = false; - { - let (_td, repo) = ::test::repo_init(); - let mut builder = t!(repo.packbuilder()); - let (commit, _tree) = commit(&repo); - t!(builder.set_progress_callback(|_, _, _| { - progress_called = true; - true - })); - t!(builder.unset_progress_callback()); - t!(builder.insert_commit(commit)); - t!(builder.write_buf(&mut Buf::new())); - } - assert_eq!(progress_called, false); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/panic.rs cargo-0.19.0/vendor/git2-0.6.3/src/panic.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/panic.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/panic.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -use std::any::Any; -use std::cell::RefCell; - -thread_local!(static LAST_ERROR: RefCell>> = { - RefCell::new(None) -}); - -#[cfg(feature = "unstable")] -pub fn wrap T + ::std::panic::UnwindSafe>(f: F) -> Option { - use std::panic; - if LAST_ERROR.with(|slot| slot.borrow().is_some()) { - return None - } - match panic::catch_unwind(f) { - Ok(ret) => Some(ret), - Err(e) => { - LAST_ERROR.with(move |slot| { - *slot.borrow_mut() = Some(e); - }); - None - } - } -} - -#[cfg(not(feature = "unstable"))] -pub fn wrap T>(f: F) -> Option { - struct Bomb { - enabled: bool, - } - impl Drop for Bomb { - fn drop(&mut self) { - if !self.enabled { - return - } - panic!("callback has panicked, and continuing to unwind into C \ - is not safe, so aborting the process"); - - } - } - let mut bomb = Bomb { enabled: true }; - let ret = Some(f()); - bomb.enabled = false; - return ret; -} - -pub fn check() { - let err = LAST_ERROR.with(|slot| slot.borrow_mut().take()); - if let Some(err) = err { - panic!(err) - } -} - -pub fn panicked() -> bool { - LAST_ERROR.with(|slot| slot.borrow().is_some()) -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/patch.rs cargo-0.19.0/vendor/git2-0.6.3/src/patch.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/patch.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/patch.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ -use std::path::Path; -use libc::{c_char, c_int, c_void}; - -use {raw, Blob, Buf, Diff, DiffDelta, DiffHunk, DiffLine, DiffOptions, Error}; -use diff::{LineCb, print_cb}; -use util::{Binding, into_opt_c_string}; - -/// A structure representing the text changes in a single diff delta. -/// -/// This is an opaque structure. -pub struct Patch { - raw: *mut raw::git_patch, -} - -unsafe impl Send for Patch {} - -impl Binding for Patch { - type Raw = *mut raw::git_patch; - unsafe fn from_raw(raw: Self::Raw) -> Patch { - Patch { raw: raw } - } - fn raw(&self) -> Self::Raw { self.raw } -} - -impl Drop for Patch { - fn drop(&mut self) { - unsafe { raw::git_patch_free(self.raw) } - } -} - -impl Patch { - /// Return a Patch for one file in a Diff. - /// - /// Returns Ok(None) for an unchanged or binary file. - pub fn from_diff(diff: &Diff, idx: usize) -> Result, Error> { - let mut ret = 0 as *mut raw::git_patch; - unsafe { - try_call!(raw::git_patch_from_diff(&mut ret, diff.raw(), idx)); - Ok(Binding::from_raw_opt(ret)) - } - } - - /// Generate a Patch by diffing two blobs. - pub fn from_blobs(old_blob: &Blob, - old_path: Option<&Path>, - new_blob: &Blob, - new_path: Option<&Path>, - opts: Option<&mut DiffOptions>) - -> Result - { - let mut ret = 0 as *mut raw::git_patch; - let old_path = try!(into_opt_c_string(old_path)); - let new_path = try!(into_opt_c_string(new_path)); - unsafe { - try_call!(raw::git_patch_from_blobs(&mut ret, - old_blob.raw(), - old_path, - new_blob.raw(), - new_path, - opts.map(|s| s.raw()))); - Ok(Binding::from_raw(ret)) - } - } - - /// Generate a Patch by diffing a blob and a buffer. - pub fn from_blob_and_buffer(old_blob: &Blob, - old_path: Option<&Path>, - new_buffer: &[u8], - new_path: Option<&Path>, - opts: Option<&mut DiffOptions>) - -> Result - { - let mut ret = 0 as *mut raw::git_patch; - let old_path = try!(into_opt_c_string(old_path)); - let new_path = try!(into_opt_c_string(new_path)); - unsafe { - try_call!(raw::git_patch_from_blob_and_buffer(&mut ret, - old_blob.raw(), - old_path, - new_buffer.as_ptr() as *const c_char, - new_buffer.len(), - new_path, - opts.map(|s| s.raw()))); - Ok(Binding::from_raw(ret)) - } - } - - /// Generate a Patch by diffing two buffers. - pub fn from_buffers(old_buffer: &[u8], - old_path: Option<&Path>, - new_buffer: &[u8], - new_path: Option<&Path>, - opts: Option<&mut DiffOptions>) - -> Result - { - let mut ret = 0 as *mut raw::git_patch; - let old_path = try!(into_opt_c_string(old_path)); - let new_path = try!(into_opt_c_string(new_path)); - unsafe { - try_call!(raw::git_patch_from_buffers(&mut ret, - old_buffer.as_ptr() as *const c_void, - old_buffer.len(), - old_path, - new_buffer.as_ptr() as *const c_char, - new_buffer.len(), - new_path, - opts.map(|s| s.raw()))); - Ok(Binding::from_raw(ret)) - } - } - - /// Get the DiffDelta associated with the Patch. - pub fn delta(&self) -> DiffDelta { - unsafe { - Binding::from_raw(raw::git_patch_get_delta(self.raw) as *mut _) - } - } - - /// Get the number of hunks in the Patch. - pub fn num_hunks(&self) -> usize { - unsafe { - raw::git_patch_num_hunks(self.raw) - } - } - - /// Get the number of lines of context, additions, and deletions in the Patch. - pub fn line_stats(&self) -> Result<(usize, usize, usize), Error> { - let mut context = 0; - let mut additions = 0; - let mut deletions = 0; - unsafe { - try_call!(raw::git_patch_line_stats(&mut context, - &mut additions, - &mut deletions, - self.raw)); - } - Ok((context, additions, deletions)) - } - - /// Get a DiffHunk and its total line count from the Patch. - pub fn hunk(&mut self, hunk_idx: usize) -> Result<(DiffHunk, usize), Error> { - let mut ret = 0 as *const raw::git_diff_hunk; - let mut lines = 0; - unsafe { - try_call!(raw::git_patch_get_hunk(&mut ret, &mut lines, self.raw, hunk_idx)); - Ok((Binding::from_raw(ret), lines)) - } - } - - /// Get the number of lines in a hunk. - pub fn num_lines_in_hunk(&self, hunk_idx: usize) -> Result { - unsafe { - Ok(try_call!(raw::git_patch_num_lines_in_hunk(self.raw, hunk_idx)) as usize) - } - } - - /// Get a DiffLine from a hunk of the Patch. - pub fn line_in_hunk(&mut self, - hunk_idx: usize, - line_of_hunk: usize) -> Result { - let mut ret = 0 as *const raw::git_diff_line; - unsafe { - try_call!(raw::git_patch_get_line_in_hunk(&mut ret, - self.raw, - hunk_idx, - line_of_hunk)); - Ok(Binding::from_raw(ret)) - } - } - - /// Get the size of a Patch's diff data in bytes. - pub fn size(&self, - include_context: bool, - include_hunk_headers: bool, - include_file_headers: bool) -> usize { - unsafe { - raw::git_patch_size(self.raw, - include_context as c_int, - include_hunk_headers as c_int, - include_file_headers as c_int) - } - } - - /// Print the Patch to text via a callback. - pub fn print(&mut self, mut line_cb: &mut LineCb) -> Result<(), Error> { - let ptr = &mut line_cb as *mut _ as *mut c_void; - unsafe { - try_call!(raw::git_patch_print(self.raw, print_cb, ptr)); - return Ok(()) - } - } - - /// Get the Patch text as a Buf. - pub fn to_buf(&mut self) -> Result { - let buf = Buf::new(); - unsafe { - try_call!(raw::git_patch_to_buf(buf.raw(), self.raw)); - } - Ok(buf) - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/pathspec.rs cargo-0.19.0/vendor/git2-0.6.3/src/pathspec.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/pathspec.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/pathspec.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,300 +0,0 @@ -use std::iter::IntoIterator; -use std::marker; -use std::ops::Range; -use std::path::Path; -use libc::size_t; - -use {raw, Error, Diff, Tree, PathspecFlags, Index, Repository, DiffDelta, IntoCString}; -use util::Binding; - -/// Structure representing a compiled pathspec used for matching against various -/// structures. -pub struct Pathspec { - raw: *mut raw::git_pathspec, -} - -/// List of filenames matching a pathspec. -pub struct PathspecMatchList<'ps> { - raw: *mut raw::git_pathspec_match_list, - _marker: marker::PhantomData<&'ps Pathspec>, -} - -/// Iterator over the matched paths in a pathspec. -pub struct PathspecEntries<'list> { - range: Range, - list: &'list PathspecMatchList<'list>, -} - -/// Iterator over the matching diff deltas. -pub struct PathspecDiffEntries<'list> { - range: Range, - list: &'list PathspecMatchList<'list>, -} - -/// Iterator over the failed list of pathspec items that did not match. -pub struct PathspecFailedEntries<'list> { - range: Range, - list: &'list PathspecMatchList<'list>, -} - -impl Pathspec { - /// Creates a new pathspec from a list of specs to match against. - pub fn new(specs: I) -> Result - where T: IntoCString, I: IntoIterator { - let (_a, _b, arr) = try!(::util::iter2cstrs(specs)); - unsafe { - let mut ret = 0 as *mut raw::git_pathspec; - try_call!(raw::git_pathspec_new(&mut ret, &arr)); - Ok(Binding::from_raw(ret)) - } - } - - /// Match a pathspec against files in a diff. - /// - /// The list returned contains the list of all matched filenames (unless you - /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the - /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is - /// specified. - pub fn match_diff(&self, diff: &Diff, flags: PathspecFlags) - -> Result { - let mut ret = 0 as *mut raw::git_pathspec_match_list; - unsafe { - try_call!(raw::git_pathspec_match_diff(&mut ret, diff.raw(), - flags.bits(), self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// Match a pathspec against files in a tree. - /// - /// The list returned contains the list of all matched filenames (unless you - /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the - /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is - /// specified. - pub fn match_tree(&self, tree: &Tree, flags: PathspecFlags) - -> Result { - let mut ret = 0 as *mut raw::git_pathspec_match_list; - unsafe { - try_call!(raw::git_pathspec_match_tree(&mut ret, tree.raw(), - flags.bits(), self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// This matches the pathspec against the files in the repository index. - /// - /// The list returned contains the list of all matched filenames (unless you - /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the - /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is - /// specified. - pub fn match_index(&self, index: &Index, flags: PathspecFlags) - -> Result { - let mut ret = 0 as *mut raw::git_pathspec_match_list; - unsafe { - try_call!(raw::git_pathspec_match_index(&mut ret, index.raw(), - flags.bits(), self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// Match a pathspec against the working directory of a repository. - /// - /// This matches the pathspec against the current files in the working - /// directory of the repository. It is an error to invoke this on a bare - /// repo. This handles git ignores (i.e. ignored files will not be - /// considered to match the pathspec unless the file is tracked in the - /// index). - /// - /// The list returned contains the list of all matched filenames (unless you - /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the - /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is - /// specified. - pub fn match_workdir(&self, repo: &Repository, flags: PathspecFlags) - -> Result { - let mut ret = 0 as *mut raw::git_pathspec_match_list; - unsafe { - try_call!(raw::git_pathspec_match_workdir(&mut ret, repo.raw(), - flags.bits(), self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// Try to match a path against a pathspec - /// - /// Unlike most of the other pathspec matching functions, this will not fall - /// back on the native case-sensitivity for your platform. You must - /// explicitly pass flags to control case sensitivity or else this will fall - /// back on being case sensitive. - pub fn matches_path(&self, path: &Path, flags: PathspecFlags) -> bool { - let path = path.into_c_string().unwrap(); - unsafe { - raw::git_pathspec_matches_path(&*self.raw, flags.bits(), - path.as_ptr()) == 1 - } - } -} - -impl Binding for Pathspec { - type Raw = *mut raw::git_pathspec; - - unsafe fn from_raw(raw: *mut raw::git_pathspec) -> Pathspec { - Pathspec { raw: raw } - } - fn raw(&self) -> *mut raw::git_pathspec { self.raw } -} - -impl Drop for Pathspec { - fn drop(&mut self) { - unsafe { raw::git_pathspec_free(self.raw) } - } -} - -impl<'ps> PathspecMatchList<'ps> { - fn entrycount(&self) -> usize { - unsafe { raw::git_pathspec_match_list_entrycount(&*self.raw) as usize } - } - - fn failed_entrycount(&self) -> usize { - unsafe { raw::git_pathspec_match_list_failed_entrycount(&*self.raw) as usize } - } - - /// Returns an iterator over the matching filenames in this list. - pub fn entries(&self) -> PathspecEntries { - let n = self.entrycount(); - let n = if n > 0 && self.entry(0).is_none() {0} else {n}; - PathspecEntries { range: 0..n, list: self } - } - - /// Get a matching filename by position. - /// - /// If this list was generated from a diff, then the return value will - /// always be `None. - pub fn entry(&self, i: usize) -> Option<&[u8]> { - unsafe { - let ptr = raw::git_pathspec_match_list_entry(&*self.raw, i as size_t); - ::opt_bytes(self, ptr) - } - } - - /// Returns an iterator over the matching diff entries in this list. - pub fn diff_entries(&self) -> PathspecDiffEntries { - let n = self.entrycount(); - let n = if n > 0 && self.diff_entry(0).is_none() {0} else {n}; - PathspecDiffEntries { range: 0..n, list: self } - } - - /// Get a matching diff delta by position. - /// - /// If the list was not generated from a diff, then the return value will - /// always be `None`. - pub fn diff_entry(&self, i: usize) -> Option { - unsafe { - let ptr = raw::git_pathspec_match_list_diff_entry(&*self.raw, - i as size_t); - Binding::from_raw_opt(ptr as *mut _) - } - } - - /// Returns an iterator over the non-matching entries in this list. - pub fn failed_entries(&self) -> PathspecFailedEntries { - let n = self.failed_entrycount(); - let n = if n > 0 && self.failed_entry(0).is_none() {0} else {n}; - PathspecFailedEntries { range: 0..n, list: self } - } - - /// Get an original pathspec string that had no matches. - pub fn failed_entry(&self, i: usize) -> Option<&[u8]> { - unsafe { - let ptr = raw::git_pathspec_match_list_failed_entry(&*self.raw, - i as size_t); - ::opt_bytes(self, ptr) - } - } -} - -impl<'ps> Binding for PathspecMatchList<'ps> { - type Raw = *mut raw::git_pathspec_match_list; - - unsafe fn from_raw(raw: *mut raw::git_pathspec_match_list) - -> PathspecMatchList<'ps> { - PathspecMatchList { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *mut raw::git_pathspec_match_list { self.raw } -} - -impl<'ps> Drop for PathspecMatchList<'ps> { - fn drop(&mut self) { - unsafe { raw::git_pathspec_match_list_free(self.raw) } - } -} - -impl<'list> Iterator for PathspecEntries<'list> { - type Item = &'list [u8]; - fn next(&mut self) -> Option<&'list [u8]> { - self.range.next().and_then(|i| self.list.entry(i)) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} -impl<'list> DoubleEndedIterator for PathspecEntries<'list> { - fn next_back(&mut self) -> Option<&'list [u8]> { - self.range.next_back().and_then(|i| self.list.entry(i)) - } -} -impl<'list> ExactSizeIterator for PathspecEntries<'list> {} - -impl<'list> Iterator for PathspecDiffEntries<'list> { - type Item = DiffDelta<'list>; - fn next(&mut self) -> Option> { - self.range.next().and_then(|i| self.list.diff_entry(i)) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} -impl<'list> DoubleEndedIterator for PathspecDiffEntries<'list> { - fn next_back(&mut self) -> Option> { - self.range.next_back().and_then(|i| self.list.diff_entry(i)) - } -} -impl<'list> ExactSizeIterator for PathspecDiffEntries<'list> {} - -impl<'list> Iterator for PathspecFailedEntries<'list> { - type Item = &'list [u8]; - fn next(&mut self) -> Option<&'list [u8]> { - self.range.next().and_then(|i| self.list.failed_entry(i)) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} -impl<'list> DoubleEndedIterator for PathspecFailedEntries<'list> { - fn next_back(&mut self) -> Option<&'list [u8]> { - self.range.next_back().and_then(|i| self.list.failed_entry(i)) - } -} -impl<'list> ExactSizeIterator for PathspecFailedEntries<'list> {} - -#[cfg(test)] -mod tests { - use PATHSPEC_DEFAULT; - use super::Pathspec; - use std::fs::File; - use std::path::Path; - - #[test] - fn smoke() { - let ps = Pathspec::new(["a"].iter()).unwrap(); - assert!(ps.matches_path(Path::new("a"), PATHSPEC_DEFAULT)); - assert!(ps.matches_path(Path::new("a/b"), PATHSPEC_DEFAULT)); - assert!(!ps.matches_path(Path::new("b"), PATHSPEC_DEFAULT)); - assert!(!ps.matches_path(Path::new("ab/c"), PATHSPEC_DEFAULT)); - - let (td, repo) = ::test::repo_init(); - let list = ps.match_workdir(&repo, PATHSPEC_DEFAULT).unwrap(); - assert_eq!(list.entries().len(), 0); - assert_eq!(list.diff_entries().len(), 0); - assert_eq!(list.failed_entries().len(), 0); - - File::create(&td.path().join("a")).unwrap(); - - let list = ps.match_workdir(&repo, ::PATHSPEC_FIND_FAILURES).unwrap(); - assert_eq!(list.entries().len(), 1); - assert_eq!(list.entries().next(), Some("a".as_bytes())); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/proxy_options.rs cargo-0.19.0/vendor/git2-0.6.3/src/proxy_options.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/proxy_options.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/proxy_options.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -use std::ffi::CString; -use std::marker; - -use raw; -use util::Binding; - -/// Options which can be specified to various fetch operations. -pub struct ProxyOptions<'a> { - url: Option, - proxy_kind: raw::git_proxy_t, - _marker: marker::PhantomData<&'a i32>, -} - -impl<'a> ProxyOptions<'a> { - /// Creates a new set of proxy options ready to be configured. - pub fn new() -> ProxyOptions<'a> { - ProxyOptions { - url: None, - proxy_kind: raw::GIT_PROXY_NONE, - _marker: marker::PhantomData, - } - } - - /// Try to auto-detect the proxy from the git configuration. - /// - /// Note that this will override `url` specified before. - pub fn auto(&mut self) -> &mut Self { - self.proxy_kind = raw::GIT_PROXY_AUTO; - self - } - - /// Specify the exact URL of the proxy to use. - /// - /// Note that this will override `auto` specified before. - pub fn url(&mut self, url: &str) -> &mut Self { - self.proxy_kind = raw::GIT_PROXY_SPECIFIED; - self.url = Some(CString::new(url).unwrap()); - self - } -} - -impl<'a> Binding for ProxyOptions<'a> { - type Raw = raw::git_proxy_options; - unsafe fn from_raw(_raw: raw::git_proxy_options) -> ProxyOptions<'a> { - panic!("can't create proxy from raw options") - } - - fn raw(&self) -> raw::git_proxy_options { - raw::git_proxy_options { - version: raw::GIT_PROXY_OPTIONS_VERSION, - kind: self.proxy_kind, - url: self.url.as_ref().map(|s| s.as_ptr()).unwrap_or(0 as *const _), - credentials: None, - certificate_check: None, - payload: 0 as *mut _, - } - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/reference.rs cargo-0.19.0/vendor/git2-0.6.3/src/reference.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/reference.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/reference.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,354 +0,0 @@ -use std::cmp::Ordering; -use std::ffi::CString; -use std::marker; -use std::mem; -use std::str; -use libc; - -use {raw, Error, Oid, Repository, Object, ObjectType}; -use util::Binding; - -struct Refdb<'repo>(&'repo Repository); - -/// A structure to represent a git [reference][1]. -/// -/// [1]: http://git-scm.com/book/en/Git-Internals-Git-References -pub struct Reference<'repo> { - raw: *mut raw::git_reference, - _marker: marker::PhantomData>, -} - -/// An iterator over the references in a repository. -pub struct References<'repo> { - raw: *mut raw::git_reference_iterator, - _marker: marker::PhantomData>, -} - -/// An iterator over the names of references in a repository. -pub struct ReferenceNames<'repo> { - inner: References<'repo>, -} - -impl<'repo> Reference<'repo> { - /// Ensure the reference name is well-formed. - pub fn is_valid_name(refname: &str) -> bool { - ::init(); - let refname = CString::new(refname).unwrap(); - unsafe { raw::git_reference_is_valid_name(refname.as_ptr()) == 1 } - } - - /// Get access to the underlying raw pointer. - pub fn raw(&self) -> *mut raw::git_reference { self.raw } - - /// Delete an existing reference. - /// - /// This method works for both direct and symbolic references. The reference - /// will be immediately removed on disk. - /// - /// This function will return an error if the reference has changed from the - /// time it was looked up. - pub fn delete(&mut self) -> Result<(), Error> { - unsafe { try_call!(raw::git_reference_delete(self.raw)); } - Ok(()) - } - - /// Check if a reference is a local branch. - pub fn is_branch(&self) -> bool { - unsafe { raw::git_reference_is_branch(&*self.raw) == 1 } - } - - /// Check if a reference is a note. - pub fn is_note(&self) -> bool { - unsafe { raw::git_reference_is_note(&*self.raw) == 1 } - } - - /// Check if a reference is a remote tracking branch - pub fn is_remote(&self) -> bool { - unsafe { raw::git_reference_is_remote(&*self.raw) == 1 } - } - - /// Check if a reference is a tag - pub fn is_tag(&self) -> bool { - unsafe { raw::git_reference_is_tag(&*self.raw) == 1 } - } - - /// Get the full name of a reference. - /// - /// Returns `None` if the name is not valid utf-8. - pub fn name(&self) -> Option<&str> { str::from_utf8(self.name_bytes()).ok() } - - /// Get the full name of a reference. - pub fn name_bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, raw::git_reference_name(&*self.raw)).unwrap() } - } - - /// Get the full shorthand of a reference. - /// - /// This will transform the reference name into a name "human-readable" - /// version. If no shortname is appropriate, it will return the full name. - /// - /// Returns `None` if the shorthand is not valid utf-8. - pub fn shorthand(&self) -> Option<&str> { - str::from_utf8(self.shorthand_bytes()).ok() - } - - /// Get the full shorthand of a reference. - pub fn shorthand_bytes(&self) -> &[u8] { - unsafe { - ::opt_bytes(self, raw::git_reference_shorthand(&*self.raw)).unwrap() - } - } - - /// Get the OID pointed to by a direct reference. - /// - /// Only available if the reference is direct (i.e. an object id reference, - /// not a symbolic one). - pub fn target(&self) -> Option { - unsafe { - Binding::from_raw_opt(raw::git_reference_target(&*self.raw)) - } - } - - /// Return the peeled OID target of this reference. - /// - /// This peeled OID only applies to direct references that point to a hard - /// Tag object: it is the result of peeling such Tag. - pub fn target_peel(&self) -> Option { - unsafe { - Binding::from_raw_opt(raw::git_reference_target_peel(&*self.raw)) - } - } - - /// Get full name to the reference pointed to by a symbolic reference. - /// - /// May return `None` if the reference is either not symbolic or not a - /// valid utf-8 string. - pub fn symbolic_target(&self) -> Option<&str> { - self.symbolic_target_bytes().and_then(|s| str::from_utf8(s).ok()) - } - - /// Get full name to the reference pointed to by a symbolic reference. - /// - /// Only available if the reference is symbolic. - pub fn symbolic_target_bytes(&self) -> Option<&[u8]> { - unsafe { ::opt_bytes(self, raw::git_reference_symbolic_target(&*self.raw)) } - } - - /// Resolve a symbolic reference to a direct reference. - /// - /// This method iteratively peels a symbolic reference until it resolves to - /// a direct reference to an OID. - /// - /// If a direct reference is passed as an argument, a copy of that - /// reference is returned. - pub fn resolve(&self) -> Result, Error> { - let mut raw = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_reference_resolve(&mut raw, &*self.raw)); - Ok(Binding::from_raw(raw)) - } - } - - /// Peel a reference to an object - /// - /// This method recursively peels the reference until it reaches - /// an object of the specified type. - pub fn peel(&self, kind: ObjectType) -> Result, Error> { - let mut raw = 0 as *mut raw::git_object; - unsafe { - try_call!(raw::git_reference_peel(&mut raw, self.raw, kind)); - Ok(Binding::from_raw(raw)) - } - } - - /// Rename an existing reference. - /// - /// This method works for both direct and symbolic references. - /// - /// If the force flag is not enabled, and there's already a reference with - /// the given name, the renaming will fail. - pub fn rename(&mut self, new_name: &str, force: bool, - msg: &str) -> Result, Error> { - let mut raw = 0 as *mut raw::git_reference; - let new_name = try!(CString::new(new_name)); - let msg = try!(CString::new(msg)); - unsafe { - try_call!(raw::git_reference_rename(&mut raw, self.raw, new_name, - force, msg)); - Ok(Binding::from_raw(raw)) - } - } - - /// Conditionally create a new reference with the same name as the given - /// reference but a different OID target. The reference must be a direct - /// reference, otherwise this will fail. - /// - /// The new reference will be written to disk, overwriting the given - /// reference. - pub fn set_target(&mut self, id: Oid, reflog_msg: &str) - -> Result, Error> { - let mut raw = 0 as *mut raw::git_reference; - let msg = try!(CString::new(reflog_msg)); - unsafe { - try_call!(raw::git_reference_set_target(&mut raw, self.raw, - id.raw(), msg)); - Ok(Binding::from_raw(raw)) - } - } - -} - -impl<'repo> PartialOrd for Reference<'repo> { - fn partial_cmp(&self, other: &Reference<'repo>) -> Option { - Some(self.cmp(other)) - } -} - -impl<'repo> Ord for Reference<'repo> { - fn cmp(&self, other: &Reference<'repo>) -> Ordering { - match unsafe { raw::git_reference_cmp(&*self.raw, &*other.raw) } { - 0 => Ordering::Equal, - n if n < 0 => Ordering::Less, - _ => Ordering::Greater, - } - } -} - -impl<'repo> PartialEq for Reference<'repo> { - fn eq(&self, other: &Reference<'repo>) -> bool { - self.cmp(other) == Ordering::Equal - } -} - -impl<'repo> Eq for Reference<'repo> {} - -impl<'repo> Binding for Reference<'repo> { - type Raw = *mut raw::git_reference; - unsafe fn from_raw(raw: *mut raw::git_reference) -> Reference<'repo> { - Reference { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *mut raw::git_reference { self.raw } -} - -impl<'repo> Drop for Reference<'repo> { - fn drop(&mut self) { - unsafe { raw::git_reference_free(self.raw) } - } -} - -impl<'repo> References<'repo> { - /// Consumes a `References` iterator to create an iterator over just the - /// name of some references. - /// - /// This is more efficient if only the names are desired of references as - /// the references themselves don't have to be allocated and deallocated. - /// - /// The returned iterator will yield strings as opposed to a `Reference`. - pub fn names(self) -> ReferenceNames<'repo> { - ReferenceNames { inner: self } - } -} - -impl<'repo> Binding for References<'repo> { - type Raw = *mut raw::git_reference_iterator; - unsafe fn from_raw(raw: *mut raw::git_reference_iterator) - -> References<'repo> { - References { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *mut raw::git_reference_iterator { self.raw } -} - -impl<'repo> Iterator for References<'repo> { - type Item = Result, Error>; - fn next(&mut self) -> Option, Error>> { - let mut out = 0 as *mut raw::git_reference; - unsafe { - try_call_iter!(raw::git_reference_next(&mut out, self.raw)); - Some(Ok(Binding::from_raw(out))) - } - } -} - -impl<'repo> Drop for References<'repo> { - fn drop(&mut self) { - unsafe { raw::git_reference_iterator_free(self.raw) } - } -} - -impl<'repo> Iterator for ReferenceNames<'repo> { - type Item = Result<&'repo str, Error>; - fn next(&mut self) -> Option> { - let mut out = 0 as *const libc::c_char; - unsafe { - try_call_iter!(raw::git_reference_next_name(&mut out, - self.inner.raw)); - let bytes = ::opt_bytes(self, out).unwrap(); - let s = str::from_utf8(bytes).unwrap(); - Some(Ok(mem::transmute::<&str, &'repo str>(s))) - } - } -} - -#[cfg(test)] -mod tests { - use {Reference, ObjectType}; - - #[test] - fn smoke() { - assert!(Reference::is_valid_name("refs/foo")); - assert!(!Reference::is_valid_name("foo")); - } - - #[test] - fn smoke2() { - let (_td, repo) = ::test::repo_init(); - let mut head = repo.head().unwrap(); - assert!(head.is_branch()); - assert!(!head.is_remote()); - assert!(!head.is_tag()); - assert!(!head.is_note()); - - assert!(head == repo.head().unwrap()); - assert_eq!(head.name(), Some("refs/heads/master")); - - assert!(head == repo.find_reference("refs/heads/master").unwrap()); - assert_eq!(repo.refname_to_id("refs/heads/master").unwrap(), - head.target().unwrap()); - - assert!(head.symbolic_target().is_none()); - assert!(head.target_peel().is_none()); - - assert_eq!(head.shorthand(), Some("master")); - assert!(head.resolve().unwrap() == head); - - let mut tag1 = repo.reference("refs/tags/tag1", - head.target().unwrap(), - false, "test").unwrap(); - assert!(tag1.is_tag()); - - let peeled_commit = tag1.peel(ObjectType::Commit).unwrap(); - assert_eq!(ObjectType::Commit, peeled_commit.kind().unwrap()); - assert_eq!(tag1.target().unwrap(), peeled_commit.id()); - - tag1.delete().unwrap(); - - let mut sym1 = repo.reference_symbolic("refs/tags/tag1", - "refs/heads/master", false, - "test").unwrap(); - sym1.delete().unwrap(); - - { - assert!(repo.references().unwrap().count() == 1); - assert!(repo.references().unwrap().next().unwrap().unwrap() == head); - let mut names = repo.references().unwrap().names(); - assert_eq!(names.next().unwrap().unwrap(), "refs/heads/master"); - assert!(names.next().is_none()); - assert!(repo.references_glob("foo").unwrap().count() == 0); - assert!(repo.references_glob("refs/heads/*").unwrap().count() == 1); - } - - let mut head = head.rename("refs/foo", true, "test").unwrap(); - head.delete().unwrap(); - - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/reflog.rs cargo-0.19.0/vendor/git2-0.6.3/src/reflog.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/reflog.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/reflog.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,167 +0,0 @@ -use std::ops::Range; -use std::marker; -use std::str; -use libc::size_t; - -use {raw, signature, Oid, Error, Signature}; -use util::Binding; - -/// A reference log of a git repository. -pub struct Reflog { - raw: *mut raw::git_reflog, -} - -/// An entry inside the reflog of a repository -pub struct ReflogEntry<'reflog> { - raw: *const raw::git_reflog_entry, - _marker: marker::PhantomData<&'reflog Reflog>, -} - -/// An iterator over the entries inside of a reflog. -pub struct ReflogIter<'reflog> { - range: Range, - reflog: &'reflog Reflog, -} - -impl Reflog { - /// Add a new entry to the in-memory reflog. - pub fn append(&mut self, new_oid: Oid, committer: &Signature, - msg: Option<&str>) -> Result<(), Error> { - let msg = try!(::opt_cstr(msg)); - unsafe { - try_call!(raw::git_reflog_append(self.raw, new_oid.raw(), - committer.raw(), msg)); - } - Ok(()) - } - - /// Remove an entry from the reflog by its index - /// - /// To ensure there's no gap in the log history, set rewrite_previous_entry - /// param value to `true`. When deleting entry n, member old_oid of entry - /// n-1 (if any) will be updated with the value of member new_oid of entry - /// n+1. - pub fn remove(&mut self, i: usize, rewrite_previous_entry: bool) - -> Result<(), Error> { - unsafe { - try_call!(raw::git_reflog_drop(self.raw, i as size_t, - rewrite_previous_entry)); - } - Ok(()) - } - - /// Lookup an entry by its index - /// - /// Requesting the reflog entry with an index of 0 (zero) will return the - /// most recently created entry. - pub fn get(&self, i: usize) -> Option { - unsafe { - let ptr = raw::git_reflog_entry_byindex(self.raw, i as size_t); - Binding::from_raw_opt(ptr) - } - } - - /// Get the number of log entries in a reflog - pub fn len(&self) -> usize { - unsafe { raw::git_reflog_entrycount(self.raw) as usize } - } - - /// Get an iterator to all entries inside of this reflog - pub fn iter(&self) -> ReflogIter { - ReflogIter { range: 0..self.len(), reflog: self } - } - - /// Write an existing in-memory reflog object back to disk using an atomic - /// file lock. - pub fn write(&mut self) -> Result<(), Error> { - unsafe { try_call!(raw::git_reflog_write(self.raw)); } - Ok(()) - } -} - -impl Binding for Reflog { - type Raw = *mut raw::git_reflog; - - unsafe fn from_raw(raw: *mut raw::git_reflog) -> Reflog { - Reflog { raw: raw } - } - fn raw(&self) -> *mut raw::git_reflog { self.raw } -} - -impl Drop for Reflog { - fn drop(&mut self) { - unsafe { raw::git_reflog_free(self.raw) } - } -} - -impl<'reflog> ReflogEntry<'reflog> { - /// Get the committer of this entry - pub fn committer(&self) -> Signature { - unsafe { - let ptr = raw::git_reflog_entry_committer(self.raw); - signature::from_raw_const(self, ptr) - } - } - - /// Get the new oid - pub fn id_new(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_reflog_entry_id_new(self.raw)) } - } - - /// Get the old oid - pub fn id_old(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_reflog_entry_id_new(self.raw)) } - } - - /// Get the log message, returning `None` on invalid UTF-8. - pub fn message(&self) -> Option<&str> { - self.message_bytes().and_then(|s| str::from_utf8(s).ok()) - } - - /// Get the log message as a byte array. - pub fn message_bytes(&self) -> Option<&[u8]> { - unsafe { - ::opt_bytes(self, raw::git_reflog_entry_message(self.raw)) - } - } -} - -impl<'reflog> Binding for ReflogEntry<'reflog> { - type Raw = *const raw::git_reflog_entry; - - unsafe fn from_raw(raw: *const raw::git_reflog_entry) -> ReflogEntry<'reflog> { - ReflogEntry { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *const raw::git_reflog_entry { self.raw } -} - -impl<'reflog> Iterator for ReflogIter<'reflog> { - type Item = ReflogEntry<'reflog>; - fn next(&mut self) -> Option> { - self.range.next().and_then(|i| self.reflog.get(i)) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} -impl<'reflog> DoubleEndedIterator for ReflogIter<'reflog> { - fn next_back(&mut self) -> Option> { - self.range.next_back().and_then(|i| self.reflog.get(i)) - } -} -impl<'reflog> ExactSizeIterator for ReflogIter<'reflog> {} - -#[cfg(test)] -mod tests { - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - let mut reflog = repo.reflog("HEAD").unwrap(); - assert_eq!(reflog.iter().len(), 1); - reflog.write().unwrap(); - - let entry = reflog.iter().next().unwrap(); - assert!(entry.message().is_some()); - - repo.reflog_rename("HEAD", "refs/heads/foo").unwrap(); - repo.reflog_delete("refs/heads/foo").unwrap(); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/refspec.rs cargo-0.19.0/vendor/git2-0.6.3/src/refspec.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/refspec.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/refspec.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -use std::ffi::CString; -use std::marker; -use std::str; - -use {raw, Direction}; -use util::Binding; - -/// A structure to represent a git [refspec][1]. -/// -/// Refspecs are currently mainly accessed/created through a `Remote`. -/// -/// [1]: http://git-scm.com/book/en/Git-Internals-The-Refspec -pub struct Refspec<'remote> { - raw: *const raw::git_refspec, - _marker: marker::PhantomData<&'remote raw::git_remote>, -} - -impl<'remote> Refspec<'remote> { - /// Get the refspec's direction. - pub fn direction(&self) -> Direction { - match unsafe { raw::git_refspec_direction(self.raw) } { - raw::GIT_DIRECTION_FETCH => Direction::Fetch, - raw::GIT_DIRECTION_PUSH => Direction::Push, - n => panic!("unknown refspec direction: {}", n), - } - } - - /// Get the destination specifier. - /// - /// If the destination is not utf-8, None is returned. - pub fn dst(&self) -> Option<&str> { - str::from_utf8(self.dst_bytes()).ok() - } - - /// Get the destination specifier, in bytes. - pub fn dst_bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, raw::git_refspec_dst(self.raw)).unwrap() } - } - - /// Check if a refspec's destination descriptor matches a reference - pub fn dst_matches(&self, refname: &str) -> bool { - let refname = CString::new(refname).unwrap(); - unsafe { raw::git_refspec_dst_matches(self.raw, refname.as_ptr()) == 1 } - } - - /// Get the source specifier. - /// - /// If the source is not utf-8, None is returned. - pub fn src(&self) -> Option<&str> { - str::from_utf8(self.src_bytes()).ok() - } - - /// Get the source specifier, in bytes. - pub fn src_bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, raw::git_refspec_src(self.raw)).unwrap() } - } - - /// Check if a refspec's source descriptor matches a reference - pub fn src_matches(&self, refname: &str) -> bool { - let refname = CString::new(refname).unwrap(); - unsafe { raw::git_refspec_src_matches(self.raw, refname.as_ptr()) == 1 } - } - - /// Get the force update setting. - pub fn is_force(&self) -> bool { - unsafe { raw::git_refspec_force(self.raw) == 1 } - } - - /// Get the refspec's string. - /// - /// Returns None if the string is not valid utf8. - pub fn str(&self) -> Option<&str> { - str::from_utf8(self.bytes()).ok() - } - - /// Get the refspec's string as a byte array - pub fn bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, raw::git_refspec_string(self.raw)).unwrap() } - } -} - -impl<'remote> Binding for Refspec<'remote> { - type Raw = *const raw::git_refspec; - - unsafe fn from_raw(raw: *const raw::git_refspec) -> Refspec<'remote> { - Refspec { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *const raw::git_refspec { self.raw } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/remote_callbacks.rs cargo-0.19.0/vendor/git2-0.6.3/src/remote_callbacks.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/remote_callbacks.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/remote_callbacks.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,334 +0,0 @@ -use std::ffi::CStr; -use std::marker; -use std::mem; -use std::slice; -use std::str; -use libc::{c_void, c_int, c_char, c_uint}; - -use {raw, panic, Error, Cred, CredentialType, Oid}; -use cert::Cert; -use util::Binding; - -/// A structure to contain the callbacks which are invoked when a repository is -/// being updated or downloaded. -/// -/// These callbacks are used to manage facilities such as authentication, -/// transfer progress, etc. -pub struct RemoteCallbacks<'a> { - progress: Option>>, - credentials: Option>>, - sideband_progress: Option>>, - update_tips: Option>>, - certificate_check: Option>>, -} - -/// Struct representing the progress by an in-flight transfer. -pub struct Progress<'a> { - raw: ProgressState, - _marker: marker::PhantomData<&'a raw::git_transfer_progress>, -} - -enum ProgressState { - Borrowed(*const raw::git_transfer_progress), - Owned(raw::git_transfer_progress), -} - -/// Callback used to acquire credentials for when a remote is fetched. -/// -/// * `url` - the resource for which the credentials are required. -/// * `username_from_url` - the username that was embedded in the url, or `None` -/// if it was not included. -/// * `allowed_types` - a bitmask stating which cred types are ok to return. -pub type Credentials<'a> = FnMut(&str, Option<&str>, CredentialType) - -> Result + 'a; - -/// Callback to be invoked while a transfer is in progress. -/// -/// This callback will be periodically called with updates to the progress of -/// the transfer so far. The return value indicates whether the transfer should -/// continue. A return value of `false` will cancel the transfer. -/// -/// * `progress` - the progress being made so far. -pub type TransferProgress<'a> = FnMut(Progress) -> bool + 'a; - -/// Callback for receiving messages delivered by the transport. -/// -/// The return value indicates whether the network operation should continue. -pub type TransportMessage<'a> = FnMut(&[u8]) -> bool + 'a; - -/// Callback for whenever a reference is updated locally. -pub type UpdateTips<'a> = FnMut(&str, Oid, Oid) -> bool + 'a; - -/// Callback for a custom certificate check. -/// -/// The first argument is the certificate receved on the connection. -/// Certificates are typically either an SSH or X509 certificate. -/// -/// The second argument is the hostname for the connection is passed as the last -/// argument. -pub type CertificateCheck<'a> = FnMut(&Cert, &str) -> bool + 'a; - -impl<'a> RemoteCallbacks<'a> { - /// Creates a new set of empty callbacks - pub fn new() -> RemoteCallbacks<'a> { - RemoteCallbacks { - credentials: None, - progress: None, - sideband_progress: None, - update_tips: None, - certificate_check: None, - } - } - - /// The callback through which to fetch credentials if required. - pub fn credentials(&mut self, cb: F) -> &mut RemoteCallbacks<'a> - where F: FnMut(&str, Option<&str>, CredentialType) - -> Result + 'a - { - self.credentials = Some(Box::new(cb) as Box>); - self - } - - /// The callback through which progress is monitored. - pub fn transfer_progress(&mut self, cb: F) -> &mut RemoteCallbacks<'a> - where F: FnMut(Progress) -> bool + 'a { - self.progress = Some(Box::new(cb) as Box>); - self - } - - /// Textual progress from the remote. - /// - /// Text sent over the progress side-band will be passed to this function - /// (this is the 'counting objects' output. - pub fn sideband_progress(&mut self, cb: F) -> &mut RemoteCallbacks<'a> - where F: FnMut(&[u8]) -> bool + 'a { - self.sideband_progress = Some(Box::new(cb) as Box>); - self - } - - /// Each time a reference is updated locally, the callback will be called - /// with information about it. - pub fn update_tips(&mut self, cb: F) -> &mut RemoteCallbacks<'a> - where F: FnMut(&str, Oid, Oid) -> bool + 'a { - self.update_tips = Some(Box::new(cb) as Box>); - self - } - - /// If certificate verification fails, then this callback will be invoked to - /// let the caller make the final decision of whether to allow the - /// connection to proceed. - pub fn certificate_check(&mut self, cb: F) -> &mut RemoteCallbacks<'a> - where F: FnMut(&Cert, &str) -> bool + 'a - { - self.certificate_check = Some(Box::new(cb) as Box>); - self - } -} - -impl<'a> Binding for RemoteCallbacks<'a> { - type Raw = raw::git_remote_callbacks; - unsafe fn from_raw(_raw: raw::git_remote_callbacks) -> RemoteCallbacks<'a> { - panic!("unimplemented"); - } - - fn raw(&self) -> raw::git_remote_callbacks { - unsafe { - let mut callbacks: raw::git_remote_callbacks = mem::zeroed(); - assert_eq!(raw::git_remote_init_callbacks(&mut callbacks, - raw::GIT_REMOTE_CALLBACKS_VERSION), 0); - if self.progress.is_some() { - let f: raw::git_transfer_progress_cb = transfer_progress_cb; - callbacks.transfer_progress = Some(f); - } - if self.credentials.is_some() { - let f: raw::git_cred_acquire_cb = credentials_cb; - callbacks.credentials = Some(f); - } - if self.sideband_progress.is_some() { - let f: raw::git_transport_message_cb = sideband_progress_cb; - callbacks.sideband_progress = Some(f); - } - if self.certificate_check.is_some() { - let f: raw::git_transport_certificate_check_cb = - certificate_check_cb; - callbacks.certificate_check = Some(f); - } - if self.update_tips.is_some() { - let f: extern fn(*const c_char, *const raw::git_oid, - *const raw::git_oid, *mut c_void) -> c_int - = update_tips_cb; - callbacks.update_tips = Some(f); - } - callbacks.payload = self as *const _ as *mut _; - return callbacks; - } - } -} - -impl<'a> Progress<'a> { - /// Number of objects in the packfile being downloaded - pub fn total_objects(&self) -> usize { - unsafe { (*self.raw()).total_objects as usize } - } - /// Received objects that have been hashed - pub fn indexed_objects(&self) -> usize { - unsafe { (*self.raw()).indexed_objects as usize } - } - /// Objects which have been downloaded - pub fn received_objects(&self) -> usize { - unsafe { (*self.raw()).received_objects as usize } - } - /// Locally-available objects that have been injected in order to fix a thin - /// pack. - pub fn local_objects(&self) -> usize { - unsafe { (*self.raw()).local_objects as usize } - } - /// Number of deltas in the packfile being downloaded - pub fn total_deltas(&self) -> usize { - unsafe { (*self.raw()).total_deltas as usize } - } - /// Received deltas that have been hashed. - pub fn indexed_deltas(&self) -> usize { - unsafe { (*self.raw()).indexed_deltas as usize } - } - /// Size of the packfile received up to now - pub fn received_bytes(&self) -> usize { - unsafe { (*self.raw()).received_bytes as usize } - } - - /// Convert this to an owned version of `Progress`. - pub fn to_owned(&self) -> Progress<'static> { - Progress { - raw: ProgressState::Owned(unsafe { *self.raw() }), - _marker: marker::PhantomData, - } - } -} - -impl<'a> Binding for Progress<'a> { - type Raw = *const raw::git_transfer_progress; - unsafe fn from_raw(raw: *const raw::git_transfer_progress) - -> Progress<'a> { - Progress { - raw: ProgressState::Borrowed(raw), - _marker: marker::PhantomData, - } - } - - fn raw(&self) -> *const raw::git_transfer_progress { - match self.raw { - ProgressState::Borrowed(raw) => raw, - ProgressState::Owned(ref raw) => raw as *const _, - } - } -} - -extern fn credentials_cb(ret: *mut *mut raw::git_cred, - url: *const c_char, - username_from_url: *const c_char, - allowed_types: c_uint, - payload: *mut c_void) -> c_int { - unsafe { - let ok = panic::wrap(|| { - let payload = &mut *(payload as *mut RemoteCallbacks); - let callback = try!(payload.credentials.as_mut() - .ok_or(raw::GIT_PASSTHROUGH as c_int)); - *ret = 0 as *mut raw::git_cred; - let url = try!(str::from_utf8(CStr::from_ptr(url).to_bytes()) - .map_err(|_| raw::GIT_PASSTHROUGH as c_int)); - let username_from_url = match ::opt_bytes(&url, username_from_url) { - Some(username) => { - Some(try!(str::from_utf8(username) - .map_err(|_| raw::GIT_PASSTHROUGH as c_int))) - } - None => None, - }; - - let cred_type = CredentialType::from_bits_truncate(allowed_types as u32); - - callback(url, username_from_url, cred_type).map_err(|e| { - e.raw_code() as c_int - }) - }); - match ok { - Some(Ok(cred)) => { - // Turns out it's a memory safety issue if we pass through any - // and all credentials into libgit2 - if allowed_types & (cred.credtype() as c_uint) != 0 { - *ret = cred.unwrap(); - 0 - } else { - raw::GIT_PASSTHROUGH as c_int - } - } - Some(Err(e)) => e, - None => -1, - } - } -} - -extern fn transfer_progress_cb(stats: *const raw::git_transfer_progress, - payload: *mut c_void) -> c_int { - let ok = panic::wrap(|| unsafe { - let payload = &mut *(payload as *mut RemoteCallbacks); - let callback = match payload.progress { - Some(ref mut c) => c, - None => return true, - }; - let progress = Binding::from_raw(stats); - callback(progress) - }); - if ok == Some(true) {0} else {-1} -} - -extern fn sideband_progress_cb(str: *const c_char, - len: c_int, - payload: *mut c_void) -> c_int { - let ok = panic::wrap(|| unsafe { - let payload = &mut *(payload as *mut RemoteCallbacks); - let callback = match payload.sideband_progress { - Some(ref mut c) => c, - None => return true, - }; - let buf = slice::from_raw_parts(str as *const u8, len as usize); - callback(buf) - }); - if ok == Some(true) {0} else {-1} -} - -extern fn update_tips_cb(refname: *const c_char, - a: *const raw::git_oid, - b: *const raw::git_oid, - data: *mut c_void) -> c_int { - let ok = panic::wrap(|| unsafe { - let payload = &mut *(data as *mut RemoteCallbacks); - let callback = match payload.update_tips { - Some(ref mut c) => c, - None => return true, - }; - let refname = str::from_utf8(CStr::from_ptr(refname).to_bytes()) - .unwrap(); - let a = Binding::from_raw(a); - let b = Binding::from_raw(b); - callback(refname, a, b) - }); - if ok == Some(true) {0} else {-1} -} - -extern fn certificate_check_cb(cert: *mut raw::git_cert, - _valid: c_int, - hostname: *const c_char, - data: *mut c_void) -> c_int { - let ok = panic::wrap(|| unsafe { - let payload = &mut *(data as *mut RemoteCallbacks); - let callback = match payload.certificate_check { - Some(ref mut c) => c, - None => return true, - }; - let cert = Binding::from_raw(cert); - let hostname = str::from_utf8(CStr::from_ptr(hostname).to_bytes()) - .unwrap(); - callback(&cert, hostname) - }); - if ok == Some(true) {0} else {-1} -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/remote.rs cargo-0.19.0/vendor/git2-0.6.3/src/remote.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/remote.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/remote.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,587 +0,0 @@ -use std::ffi::CString; -use std::ops::Range; -use std::marker; -use std::mem; -use std::slice; -use std::str; -use libc; - -use {raw, Direction, Error, Refspec, Oid, FetchPrune, ProxyOptions}; -use {RemoteCallbacks, Progress, Repository, AutotagOption}; -use util::Binding; - -/// A structure representing a [remote][1] of a git repository. -/// -/// [1]: http://git-scm.com/book/en/Git-Basics-Working-with-Remotes -/// -/// The lifetime is the lifetime of the repository that it is attached to. The -/// remote is used to manage fetches and pushes as well as refspecs. -pub struct Remote<'repo> { - raw: *mut raw::git_remote, - _marker: marker::PhantomData<&'repo Repository>, -} - -/// An iterator over the refspecs that a remote contains. -pub struct Refspecs<'remote> { - range: Range, - remote: &'remote Remote<'remote>, -} - -/// Description of a reference advertised bya remote server, given out on calls -/// to `list`. -pub struct RemoteHead<'remote> { - raw: *const raw::git_remote_head, - _marker: marker::PhantomData<&'remote str>, -} - -/// Options which can be specified to various fetch operations. -pub struct FetchOptions<'cb> { - callbacks: Option>, - proxy: Option>, - prune: FetchPrune, - update_fetchhead: bool, - download_tags: AutotagOption, -} - -/// Options to control the behavior of a git push. -pub struct PushOptions<'cb> { - callbacks: Option>, - proxy: Option>, - pb_parallelism: u32, -} - -impl<'repo> Remote<'repo> { - /// Ensure the remote name is well-formed. - pub fn is_valid_name(remote_name: &str) -> bool { - ::init(); - let remote_name = CString::new(remote_name).unwrap(); - unsafe { raw::git_remote_is_valid_name(remote_name.as_ptr()) == 1 } - } - - /// Get the remote's name. - /// - /// Returns `None` if this remote has not yet been named or if the name is - /// not valid utf-8 - pub fn name(&self) -> Option<&str> { - self.name_bytes().and_then(|s| str::from_utf8(s).ok()) - } - - /// Get the remote's name, in bytes. - /// - /// Returns `None` if this remote has not yet been named - pub fn name_bytes(&self) -> Option<&[u8]> { - unsafe { ::opt_bytes(self, raw::git_remote_name(&*self.raw)) } - } - - /// Get the remote's url. - /// - /// Returns `None` if the url is not valid utf-8 - pub fn url(&self) -> Option<&str> { - str::from_utf8(self.url_bytes()).ok() - } - - /// Get the remote's url as a byte array. - pub fn url_bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, raw::git_remote_url(&*self.raw)).unwrap() } - } - - /// Get the remote's pushurl. - /// - /// Returns `None` if the pushurl is not valid utf-8 - pub fn pushurl(&self) -> Option<&str> { - self.pushurl_bytes().and_then(|s| str::from_utf8(s).ok()) - } - - /// Get the remote's pushurl as a byte array. - pub fn pushurl_bytes(&self) -> Option<&[u8]> { - unsafe { ::opt_bytes(self, raw::git_remote_pushurl(&*self.raw)) } - } - - /// Open a connection to a remote. - pub fn connect(&mut self, dir: Direction) -> Result<(), Error> { - // TODO: can callbacks be exposed safely? - unsafe { - try_call!(raw::git_remote_connect(self.raw, dir, - 0 as *const _, - 0 as *const _, - 0 as *const _)); - } - Ok(()) - } - - /// Check whether the remote is connected - pub fn connected(&mut self) -> bool { - unsafe { raw::git_remote_connected(self.raw) == 1 } - } - - /// Disconnect from the remote - pub fn disconnect(&mut self) { - unsafe { raw::git_remote_disconnect(self.raw) } - } - - /// Download and index the packfile - /// - /// Connect to the remote if it hasn't been done yet, negotiate with the - /// remote git which objects are missing, download and index the packfile. - /// - /// The .idx file will be created and both it and the packfile with be - /// renamed to their final name. - /// - /// The `specs` argument is a list of refspecs to use for this negotiation - /// and download. Use an empty array to use the base refspecs. - pub fn download(&mut self, specs: &[&str], opts: Option<&mut FetchOptions>) - -> Result<(), Error> { - let (_a, _b, arr) = try!(::util::iter2cstrs(specs.iter())); - let raw = opts.map(|o| o.raw()); - unsafe { - try_call!(raw::git_remote_download(self.raw, &arr, raw.as_ref())); - } - Ok(()) - } - - /// Get the number of refspecs for a remote - pub fn refspecs<'a>(&'a self) -> Refspecs<'a> { - let cnt = unsafe { raw::git_remote_refspec_count(&*self.raw) as usize }; - Refspecs { range: 0..cnt, remote: self } - } - - /// Get the `nth` refspec from this remote. - /// - /// The `refspecs` iterator can be used to iterate over all refspecs. - pub fn get_refspec(&self, i: usize) -> Option> { - unsafe { - let ptr = raw::git_remote_get_refspec(&*self.raw, - i as libc::size_t); - Binding::from_raw_opt(ptr) - } - } - - /// Download new data and update tips - /// - /// Convenience function to connect to a remote, download the data, - /// disconnect and update the remote-tracking branches. - pub fn fetch(&mut self, - refspecs: &[&str], - opts: Option<&mut FetchOptions>, - reflog_msg: Option<&str>) -> Result<(), Error> { - let (_a, _b, arr) = try!(::util::iter2cstrs(refspecs.iter())); - let msg = try!(::opt_cstr(reflog_msg)); - let raw = opts.map(|o| o.raw()); - unsafe { - try_call!(raw::git_remote_fetch(self.raw, &arr, raw.as_ref(), msg)); - } - Ok(()) - } - - /// Update the tips to the new state - pub fn update_tips(&mut self, - callbacks: Option<&mut RemoteCallbacks>, - update_fetchhead: bool, - download_tags: AutotagOption, - msg: Option<&str>) -> Result<(), Error> { - let msg = try!(::opt_cstr(msg)); - let cbs = callbacks.map(|cb| cb.raw()); - unsafe { - try_call!(raw::git_remote_update_tips(self.raw, cbs.as_ref(), - update_fetchhead, - download_tags, msg)); - } - Ok(()) - } - - /// Perform a push - /// - /// Perform all the steps for a push. If no refspecs are passed then the - /// configured refspecs will be used. - pub fn push(&mut self, - refspecs: &[&str], - opts: Option<&mut PushOptions>) -> Result<(), Error> { - let (_a, _b, arr) = try!(::util::iter2cstrs(refspecs.iter())); - let raw = opts.map(|o| o.raw()); - unsafe { - try_call!(raw::git_remote_push(self.raw, &arr, raw.as_ref())); - } - Ok(()) - } - - /// Get the statistics structure that is filled in by the fetch operation. - pub fn stats(&self) -> Progress { - unsafe { - Binding::from_raw(raw::git_remote_stats(self.raw)) - } - } - - /// Get the remote repository's reference advertisement list. - /// - /// Get the list of references with which the server responds to a new - /// connection. - /// - /// The remote (or more exactly its transport) must have connected to the - /// remote repository. This list is available as soon as the connection to - /// the remote is initiated and it remains available after disconnecting. - pub fn list(&self) -> Result<&[RemoteHead], Error> { - let mut size = 0; - let mut base = 0 as *mut _; - unsafe { - try_call!(raw::git_remote_ls(&mut base, &mut size, self.raw)); - assert_eq!(mem::size_of::(), - mem::size_of::<*const raw::git_remote_head>()); - let slice = slice::from_raw_parts(base as *const _, size as usize); - Ok(mem::transmute::<&[*const raw::git_remote_head], - &[RemoteHead]>(slice)) - } - } -} - -impl<'repo> Clone for Remote<'repo> { - fn clone(&self) -> Remote<'repo> { - let mut ret = 0 as *mut raw::git_remote; - let rc = unsafe { call!(raw::git_remote_dup(&mut ret, self.raw)) }; - assert_eq!(rc, 0); - Remote { - raw: ret, - _marker: marker::PhantomData, - } - } -} - -impl<'repo> Binding for Remote<'repo> { - type Raw = *mut raw::git_remote; - - unsafe fn from_raw(raw: *mut raw::git_remote) -> Remote<'repo> { - Remote { - raw: raw, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *mut raw::git_remote { self.raw } -} - -impl<'repo> Drop for Remote<'repo> { - fn drop(&mut self) { - unsafe { raw::git_remote_free(self.raw) } - } -} - -impl<'repo> Iterator for Refspecs<'repo> { - type Item = Refspec<'repo>; - fn next(&mut self) -> Option> { - self.range.next().and_then(|i| self.remote.get_refspec(i)) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} -impl<'repo> DoubleEndedIterator for Refspecs<'repo> { - fn next_back(&mut self) -> Option> { - self.range.next_back().and_then(|i| self.remote.get_refspec(i)) - } -} -impl<'repo> ExactSizeIterator for Refspecs<'repo> {} - -#[allow(missing_docs)] // not documented in libgit2 :( -impl<'remote> RemoteHead<'remote> { - /// Flag if this is available locally. - pub fn is_local(&self) -> bool { - unsafe { (*self.raw).local != 0 } - } - - pub fn oid(&self) -> Oid { - unsafe { Binding::from_raw(&(*self.raw).oid as *const _) } - } - pub fn loid(&self) -> Oid { - unsafe { Binding::from_raw(&(*self.raw).loid as *const _) } - } - - pub fn name(&self) -> &str { - let b = unsafe { ::opt_bytes(self, (*self.raw).name).unwrap() }; - str::from_utf8(b).unwrap() - } - - pub fn symref_target(&self) -> Option<&str> { - let b = unsafe { ::opt_bytes(self, (*self.raw).symref_target) }; - b.map(|b| str::from_utf8(b).unwrap()) - } -} - -impl<'cb> FetchOptions<'cb> { - /// Creates a new blank set of fetch options - pub fn new() -> FetchOptions<'cb> { - FetchOptions { - callbacks: None, - proxy: None, - prune: FetchPrune::Unspecified, - update_fetchhead: true, - download_tags: AutotagOption::Unspecified, - } - } - - /// Set the callbacks to use for the fetch operation. - pub fn remote_callbacks(&mut self, cbs: RemoteCallbacks<'cb>) -> &mut Self { - self.callbacks = Some(cbs); - self - } - - /// Set the proxy options to use for the fetch operation. - pub fn proxy_options(&mut self, opts: ProxyOptions<'cb>) -> &mut Self { - self.proxy = Some(opts); - self - } - - /// Set whether to perform a prune after the fetch. - pub fn prune(&mut self, prune: FetchPrune) -> &mut Self { - self.prune = prune; - self - } - - /// Set whether to write the results to FETCH_HEAD. - /// - /// Defaults to `true`. - pub fn update_fetchhead(&mut self, update: bool) -> &mut Self { - self.update_fetchhead = update; - self - } - - /// Set how to behave regarding tags on the remote, such as auto-downloading - /// tags for objects we're downloading or downloading all of them. - /// - /// The default is to auto-follow tags. - pub fn download_tags(&mut self, opt: AutotagOption) -> &mut Self { - self.download_tags = opt; - self - } -} - -impl<'cb> Binding for FetchOptions<'cb> { - type Raw = raw::git_fetch_options; - - unsafe fn from_raw(_raw: raw::git_fetch_options) -> FetchOptions<'cb> { - panic!("unimplemented"); - } - fn raw(&self) -> raw::git_fetch_options { - raw::git_fetch_options { - version: 1, - callbacks: self.callbacks.as_ref().map(|m| m.raw()) - .unwrap_or_else(|| RemoteCallbacks::new().raw()), - proxy_opts: self.proxy.as_ref().map(|m| m.raw()) - .unwrap_or_else(|| ProxyOptions::new().raw()), - prune: ::call::convert(&self.prune), - update_fetchhead: ::call::convert(&self.update_fetchhead), - download_tags: ::call::convert(&self.download_tags), - // TODO: expose this as a builder option - custom_headers: raw::git_strarray { - count: 0, - strings: 0 as *mut _, - }, - } - } -} - -impl<'cb> PushOptions<'cb> { - /// Creates a new blank set of push options - pub fn new() -> PushOptions<'cb> { - PushOptions { - callbacks: None, - proxy: None, - pb_parallelism: 1, - } - } - - /// Set the callbacks to use for the fetch operation. - pub fn remote_callbacks(&mut self, cbs: RemoteCallbacks<'cb>) -> &mut Self { - self.callbacks = Some(cbs); - self - } - - /// Set the proxy options to use for the fetch operation. - pub fn proxy_options(&mut self, opts: ProxyOptions<'cb>) -> &mut Self { - self.proxy = Some(opts); - self - } - - /// If the transport being used to push to the remote requires the creation - /// of a pack file, this controls the number of worker threads used by the - /// packbuilder when creating that pack file to be sent to the remote. - /// - /// if set to 0 the packbuilder will auto-detect the number of threads to - /// create, and the default value is 1. - pub fn packbuilder_parallelism(&mut self, parallel: u32) -> &mut Self { - self.pb_parallelism = parallel; - self - } -} - -impl<'cb> Binding for PushOptions<'cb> { - type Raw = raw::git_push_options; - - unsafe fn from_raw(_raw: raw::git_push_options) -> PushOptions<'cb> { - panic!("unimplemented"); - } - fn raw(&self) -> raw::git_push_options { - raw::git_push_options { - version: 1, - callbacks: self.callbacks.as_ref().map(|m| m.raw()) - .unwrap_or(RemoteCallbacks::new().raw()), - proxy_opts: self.proxy.as_ref().map(|m| m.raw()) - .unwrap_or_else(|| ProxyOptions::new().raw()), - pb_parallelism: self.pb_parallelism as libc::c_uint, - // TODO: expose this as a builder option - custom_headers: raw::git_strarray { - count: 0, - strings: 0 as *mut _, - }, - } - } -} - -#[cfg(test)] -mod tests { - use std::cell::Cell; - use tempdir::TempDir; - use {Repository, Remote, RemoteCallbacks, Direction, FetchOptions}; - use {AutotagOption}; - - #[test] - fn smoke() { - let (td, repo) = ::test::repo_init(); - t!(repo.remote("origin", "/path/to/nowhere")); - drop(repo); - - let repo = t!(Repository::init(td.path())); - let origin = t!(repo.find_remote("origin")); - assert_eq!(origin.name(), Some("origin")); - assert_eq!(origin.url(), Some("/path/to/nowhere")); - assert_eq!(origin.pushurl(), None); - - t!(repo.remote_set_url("origin", "/path/to/elsewhere")); - t!(repo.remote_set_pushurl("origin", Some("/path/to/elsewhere"))); - - let stats = origin.stats(); - assert_eq!(stats.total_objects(), 0); - } - - #[test] - fn create_remote() { - let td = TempDir::new("test").unwrap(); - let remote = td.path().join("remote"); - Repository::init_bare(&remote).unwrap(); - - let (_td, repo) = ::test::repo_init(); - let url = if cfg!(unix) { - format!("file://{}", remote.display()) - } else { - format!("file:///{}", remote.display().to_string() - .replace("\\", "/")) - }; - - let mut origin = repo.remote("origin", &url).unwrap(); - assert_eq!(origin.name(), Some("origin")); - assert_eq!(origin.url(), Some(&url[..])); - assert_eq!(origin.pushurl(), None); - - { - let mut specs = origin.refspecs(); - let spec = specs.next().unwrap(); - assert!(specs.next().is_none()); - assert_eq!(spec.str(), Some("+refs/heads/*:refs/remotes/origin/*")); - assert_eq!(spec.dst(), Some("refs/remotes/origin/*")); - assert_eq!(spec.src(), Some("refs/heads/*")); - assert!(spec.is_force()); - } - assert!(origin.refspecs().next_back().is_some()); - { - let remotes = repo.remotes().unwrap(); - assert_eq!(remotes.len(), 1); - assert_eq!(remotes.get(0), Some("origin")); - assert_eq!(remotes.iter().count(), 1); - assert_eq!(remotes.iter().next().unwrap(), Some("origin")); - } - - origin.connect(Direction::Push).unwrap(); - assert!(origin.connected()); - origin.disconnect(); - - origin.connect(Direction::Fetch).unwrap(); - assert!(origin.connected()); - origin.download(&[], None).unwrap(); - origin.disconnect(); - - origin.fetch(&[], None, None).unwrap(); - origin.fetch(&[], None, Some("foo")).unwrap(); - origin.update_tips(None, true, AutotagOption::Unspecified, None).unwrap(); - origin.update_tips(None, true, AutotagOption::All, Some("foo")).unwrap(); - - t!(repo.remote_add_fetch("origin", "foo")); - t!(repo.remote_add_fetch("origin", "bar")); - } - - #[test] - fn rename_remote() { - let (_td, repo) = ::test::repo_init(); - repo.remote("origin", "foo").unwrap(); - repo.remote_rename("origin", "foo").unwrap(); - repo.remote_delete("foo").unwrap(); - } - - #[test] - fn create_remote_anonymous() { - let td = TempDir::new("test").unwrap(); - let repo = Repository::init(td.path()).unwrap(); - - let origin = repo.remote_anonymous("/path/to/nowhere").unwrap(); - assert_eq!(origin.name(), None); - drop(origin.clone()); - } - - #[test] - fn is_valid() { - assert!(Remote::is_valid_name("foobar")); - assert!(!Remote::is_valid_name("\x01")); - } - - #[test] - fn transfer_cb() { - let (td, _repo) = ::test::repo_init(); - let td2 = TempDir::new("git").unwrap(); - let url = ::test::path2url(&td.path()); - - let repo = Repository::init(td2.path()).unwrap(); - let progress_hit = Cell::new(false); - { - let mut callbacks = RemoteCallbacks::new(); - let mut origin = repo.remote("origin", &url).unwrap(); - - callbacks.transfer_progress(|_progress| { - progress_hit.set(true); - true - }); - origin.fetch(&[], - Some(FetchOptions::new().remote_callbacks(callbacks)), - None).unwrap(); - - let list = t!(origin.list()); - assert_eq!(list.len(), 2); - assert_eq!(list[0].name(), "HEAD"); - assert!(!list[0].is_local()); - assert_eq!(list[1].name(), "refs/heads/master"); - assert!(!list[1].is_local()); - } - assert!(progress_hit.get()); - } - - #[test] - fn push() { - let (_td, repo) = ::test::repo_init(); - let td2 = TempDir::new("git1").unwrap(); - let td3 = TempDir::new("git2").unwrap(); - let url = ::test::path2url(&td2.path()); - - Repository::init_bare(td2.path()).unwrap(); - // git push - let mut remote = repo.remote("origin", &url).unwrap(); - remote.push(&["refs/heads/master"], None).unwrap(); - - let repo = Repository::clone(&url, td3.path()).unwrap(); - let commit = repo.head().unwrap().target().unwrap(); - let commit = repo.find_commit(commit).unwrap(); - assert_eq!(commit.message(), Some("initial")); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/repo.rs cargo-0.19.0/vendor/git2-0.6.3/src/repo.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/repo.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/repo.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2191 +0,0 @@ -use std::env; -use std::ffi::{CStr, CString, OsStr}; -use std::iter::IntoIterator; -use std::mem; -use std::path::Path; -use std::str; -use libc::{c_int, c_char, size_t, c_void, c_uint}; - -use {raw, Revspec, Error, init, Object, RepositoryOpenFlags, RepositoryState, Remote, Buf}; -use {ResetType, Signature, Reference, References, Submodule, Blame, BlameOptions}; -use {Branches, BranchType, Index, Config, Oid, Blob, Branch, Commit, Tree}; -use {AnnotatedCommit, MergeOptions, SubmoduleIgnore, SubmoduleStatus}; -use {ObjectType, Tag, Note, Notes, StatusOptions, Statuses, Status, Revwalk}; -use {RevparseMode, RepositoryInitMode, Reflog, IntoCString, Describe}; -use {DescribeOptions, TreeBuilder, Diff, DiffOptions, PackBuilder}; -use build::{RepoBuilder, CheckoutBuilder}; -use string_array::StringArray; -use oid_array::OidArray; -use util::{self, Binding}; - -/// An owned git repository, representing all state associated with the -/// underlying filesystem. -/// -/// This structure corresponds to a `git_repository` in libgit2. Many other -/// types in git2-rs are derivative from this structure and are attached to its -/// lifetime. -/// -/// When a repository goes out of scope it is freed in memory but not deleted -/// from the filesystem. -pub struct Repository { - raw: *mut raw::git_repository, -} - -// It is the current belief that a `Repository` can be sent among threads, or -// even shared among threads in a mutex. -unsafe impl Send for Repository {} - -/// Options which can be used to configure how a repository is initialized -pub struct RepositoryInitOptions { - flags: u32, - mode: u32, - workdir_path: Option, - description: Option, - template_path: Option, - initial_head: Option, - origin_url: Option, -} - -impl Repository { - /// Attempt to open an already-existing repository at `path`. - /// - /// The path can point to either a normal or bare repository. - pub fn open>(path: P) -> Result { - init(); - let path = try!(path.as_ref().into_c_string()); - let mut ret = 0 as *mut raw::git_repository; - unsafe { - try_call!(raw::git_repository_open(&mut ret, path)); - Ok(Binding::from_raw(ret)) - } - } - - /// Find and open an existing repository, respecting git environment - /// variables. This acts like `open_ext` with the - /// `REPOSITORY_OPEN_FROM_ENV` flag, but additionally respects `$GIT_DIR`. - /// With `$GIT_DIR` unset, this will search for a repository starting in - /// the current directory. - pub fn open_from_env() -> Result { - init(); - let mut ret = 0 as *mut raw::git_repository; - let flags = raw::GIT_REPOSITORY_OPEN_FROM_ENV; - unsafe { - try_call!(raw::git_repository_open_ext(&mut ret, - 0 as *const _, - flags as c_uint, - 0 as *const _)); - Ok(Binding::from_raw(ret)) - } - } - - /// Find and open an existing repository, with additional options. - /// - /// If flags contains REPOSITORY_OPEN_NO_SEARCH, the path must point - /// directly to a repository; otherwise, this may point to a subdirectory - /// of a repository, and `open_ext` will search up through parent - /// directories. - /// - /// If flags contains REPOSITORY_OPEN_CROSS_FS, the search through parent - /// directories will not cross a filesystem boundary (detected when the - /// stat st_dev field changes). - /// - /// If flags contains REPOSITORY_OPEN_BARE, force opening the repository as - /// bare even if it isn't, ignoring any working directory, and defer - /// loading the repository configuration for performance. - /// - /// If flags contains REPOSITORY_OPEN_NO_DOTGIT, don't try appending - /// `/.git` to `path`. - /// - /// If flags contains REPOSITORY_OPEN_FROM_ENV, `open_ext` will ignore - /// other flags and `ceiling_dirs`, and respect the same environment - /// variables git does. Note, however, that `path` overrides `$GIT_DIR`; to - /// respect `$GIT_DIR` as well, use `open_from_env`. - /// - /// ceiling_dirs specifies a list of paths that the search through parent - /// directories will stop before entering. Use the functions in std::env - /// to construct or manipulate such a path list. - pub fn open_ext(path: P, - flags: RepositoryOpenFlags, - ceiling_dirs: I) - -> Result - where P: AsRef, O: AsRef, I: IntoIterator - { - init(); - let path = try!(path.as_ref().into_c_string()); - let ceiling_dirs_os = try!(env::join_paths(ceiling_dirs)); - let ceiling_dirs = try!(ceiling_dirs_os.into_c_string()); - let mut ret = 0 as *mut raw::git_repository; - unsafe { - try_call!(raw::git_repository_open_ext(&mut ret, - path, - flags.bits() as c_uint, - ceiling_dirs)); - Ok(Binding::from_raw(ret)) - } - } - - /// Attempt to open an already-existing repository at or above `path` - /// - /// This starts at `path` and looks up the filesystem hierarchy - /// until it finds a repository. - pub fn discover>(path: P) -> Result { - // TODO: this diverges significantly from the libgit2 API - init(); - let buf = Buf::new(); - let path = try!(path.as_ref().into_c_string()); - unsafe { - try_call!(raw::git_repository_discover(buf.raw(), path, 1, - 0 as *const _)); - } - Repository::open(util::bytes2path(&*buf)) - } - - /// Creates a new repository in the specified folder. - /// - /// This by default will create any necessary directories to create the - /// repository, and it will read any user-specified templates when creating - /// the repository. This behavior can be configured through `init_opts`. - pub fn init>(path: P) -> Result { - Repository::init_opts(path, &RepositoryInitOptions::new()) - } - - /// Creates a new `--bare` repository in the specified folder. - /// - /// The folder must exist prior to invoking this function. - pub fn init_bare>(path: P) -> Result { - Repository::init_opts(path, RepositoryInitOptions::new().bare(true)) - } - - /// Creates a new `--bare` repository in the specified folder. - /// - /// The folder must exist prior to invoking this function. - pub fn init_opts>(path: P, opts: &RepositoryInitOptions) - -> Result { - init(); - let path = try!(path.as_ref().into_c_string()); - let mut ret = 0 as *mut raw::git_repository; - unsafe { - let mut opts = opts.raw(); - try_call!(raw::git_repository_init_ext(&mut ret, path, &mut opts)); - Ok(Binding::from_raw(ret)) - } - } - - /// Clone a remote repository. - /// - /// See the `RepoBuilder` struct for more information. This function will - /// delegate to a fresh `RepoBuilder` - pub fn clone>(url: &str, into: P) - -> Result { - ::init(); - RepoBuilder::new().clone(url, into.as_ref()) - } - - /// Execute a rev-parse operation against the `spec` listed. - /// - /// The resulting revision specification is returned, or an error is - /// returned if one occurs. - pub fn revparse(&self, spec: &str) -> Result { - let mut raw = raw::git_revspec { - from: 0 as *mut _, - to: 0 as *mut _, - flags: 0, - }; - let spec = try!(CString::new(spec)); - unsafe { - try_call!(raw::git_revparse(&mut raw, self.raw, spec)); - let to = Binding::from_raw_opt(raw.to); - let from = Binding::from_raw_opt(raw.from); - let mode = RevparseMode::from_bits_truncate(raw.flags as u32); - Ok(Revspec::from_objects(from, to, mode)) - } - } - - /// Find a single object, as specified by a revision string. - pub fn revparse_single(&self, spec: &str) -> Result { - let spec = try!(CString::new(spec)); - let mut obj = 0 as *mut raw::git_object; - unsafe { - try_call!(raw::git_revparse_single(&mut obj, self.raw, spec)); - assert!(!obj.is_null()); - Ok(Binding::from_raw(obj)) - } - } - - /// Find a single object and intermediate reference by a revision string. - /// - /// See `man gitrevisions`, or - /// http://git-scm.com/docs/git-rev-parse.html#_specifying_revisions for - /// information on the syntax accepted. - /// - /// In some cases (`@{<-n>}` or `@{upstream}`), the expression - /// may point to an intermediate reference. When such expressions are being - /// passed in, this intermediate reference is returned. - pub fn revparse_ext(&self, spec: &str) - -> Result<(Object, Option), Error> { - let spec = try!(CString::new(spec)); - let mut git_obj = 0 as *mut raw::git_object; - let mut git_ref = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_revparse_ext(&mut git_obj, &mut git_ref, - self.raw, spec)); - assert!(!git_obj.is_null()); - Ok((Binding::from_raw(git_obj), Binding::from_raw_opt(git_ref))) - } - } - - /// Tests whether this repository is a bare repository or not. - pub fn is_bare(&self) -> bool { - unsafe { raw::git_repository_is_bare(self.raw) == 1 } - } - - /// Tests whether this repository is a shallow clone. - pub fn is_shallow(&self) -> bool { - unsafe { raw::git_repository_is_shallow(self.raw) == 1 } - } - - /// Tests whether this repository is empty. - pub fn is_empty(&self) -> Result { - let empty = unsafe { - try_call!(raw::git_repository_is_empty(self.raw)) - }; - Ok(empty == 1) - } - - /// Returns the path to the `.git` folder for normal repositories or the - /// repository itself for bare repositories. - pub fn path(&self) -> &Path { - unsafe { - let ptr = raw::git_repository_path(self.raw); - util::bytes2path(::opt_bytes(self, ptr).unwrap()) - } - } - - /// Returns the current state of this repository - pub fn state(&self) -> RepositoryState { - let state = unsafe { raw::git_repository_state(self.raw) }; - macro_rules! check( ($($raw:ident => $real:ident),*) => ( - $(if state == raw::$raw as c_int { - super::RepositoryState::$real - }) else * - else { - panic!("unknown repository state: {}", state) - } - ) ); - - check!( - GIT_REPOSITORY_STATE_NONE => Clean, - GIT_REPOSITORY_STATE_MERGE => Merge, - GIT_REPOSITORY_STATE_REVERT => Revert, - GIT_REPOSITORY_STATE_REVERT_SEQUENCE => RevertSequence, - GIT_REPOSITORY_STATE_CHERRYPICK => CherryPick, - GIT_REPOSITORY_STATE_CHERRYPICK_SEQUENCE => CherryPickSequence, - GIT_REPOSITORY_STATE_BISECT => Bisect, - GIT_REPOSITORY_STATE_REBASE => Rebase, - GIT_REPOSITORY_STATE_REBASE_INTERACTIVE => RebaseInteractive, - GIT_REPOSITORY_STATE_REBASE_MERGE => RebaseMerge, - GIT_REPOSITORY_STATE_APPLY_MAILBOX => ApplyMailbox, - GIT_REPOSITORY_STATE_APPLY_MAILBOX_OR_REBASE => ApplyMailboxOrRebase - ) - } - - /// Get the path of the working directory for this repository. - /// - /// If this repository is bare, then `None` is returned. - pub fn workdir(&self) -> Option<&Path> { - unsafe { - let ptr = raw::git_repository_workdir(self.raw); - if ptr.is_null() { - None - } else { - Some(util::bytes2path(CStr::from_ptr(ptr).to_bytes())) - } - } - } - - /// Set the path to the working directory for this repository. - /// - /// If `update_link` is true, create/update the gitlink file in the workdir - /// and set config "core.worktree" (if workdir is not the parent of the .git - /// directory). - pub fn set_workdir(&self, path: &Path, update_gitlink: bool) - -> Result<(), Error> { - let path = try!(path.into_c_string()); - unsafe { - try_call!(raw::git_repository_set_workdir(self.raw(), path, - update_gitlink)); - } - Ok(()) - } - - /// Get the currently active namespace for this repository. - /// - /// If there is no namespace, or the namespace is not a valid utf8 string, - /// `None` is returned. - pub fn namespace(&self) -> Option<&str> { - self.namespace_bytes().and_then(|s| str::from_utf8(s).ok()) - } - - /// Get the currently active namespace for this repository as a byte array. - /// - /// If there is no namespace, `None` is returned. - pub fn namespace_bytes(&self) -> Option<&[u8]> { - unsafe { ::opt_bytes(self, raw::git_repository_get_namespace(self.raw)) } - } - - /// List all remotes for a given repository - pub fn remotes(&self) -> Result { - let mut arr = raw::git_strarray { - strings: 0 as *mut *mut c_char, - count: 0, - }; - unsafe { - try_call!(raw::git_remote_list(&mut arr, self.raw)); - Ok(Binding::from_raw(arr)) - } - } - - /// Get the information for a particular remote - pub fn find_remote(&self, name: &str) -> Result { - let mut ret = 0 as *mut raw::git_remote; - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_remote_lookup(&mut ret, self.raw, name)); - Ok(Binding::from_raw(ret)) - } - } - - /// Add a remote with the default fetch refspec to the repository's - /// configuration. - pub fn remote(&self, name: &str, url: &str) -> Result { - let mut ret = 0 as *mut raw::git_remote; - let name = try!(CString::new(name)); - let url = try!(CString::new(url)); - unsafe { - try_call!(raw::git_remote_create(&mut ret, self.raw, name, url)); - Ok(Binding::from_raw(ret)) - } - } - - /// Create an anonymous remote - /// - /// Create a remote with the given url and refspec in memory. You can use - /// this when you have a URL instead of a remote's name. Note that anonymous - /// remotes cannot be converted to persisted remotes. - pub fn remote_anonymous(&self, url: &str) -> Result { - let mut ret = 0 as *mut raw::git_remote; - let url = try!(CString::new(url)); - unsafe { - try_call!(raw::git_remote_create_anonymous(&mut ret, self.raw, url)); - Ok(Binding::from_raw(ret)) - } - } - - /// Give a remote a new name - /// - /// All remote-tracking branches and configuration settings for the remote - /// are updated. - /// - /// A temporary in-memory remote cannot be given a name with this method. - /// - /// No loaded instances of the remote with the old name will change their - /// name or their list of refspecs. - /// - /// The returned array of strings is a list of the non-default refspecs - /// which cannot be renamed and are returned for further processing by the - /// caller. - pub fn remote_rename(&self, name: &str, - new_name: &str) -> Result { - let name = try!(CString::new(name)); - let new_name = try!(CString::new(new_name)); - let mut problems = raw::git_strarray { - count: 0, - strings: 0 as *mut *mut c_char, - }; - unsafe { - try_call!(raw::git_remote_rename(&mut problems, self.raw, name, - new_name)); - Ok(Binding::from_raw(problems)) - } - } - - /// Delete an existing persisted remote. - /// - /// All remote-tracking branches and configuration settings for the remote - /// will be removed. - pub fn remote_delete(&self, name: &str) -> Result<(), Error> { - let name = try!(CString::new(name)); - unsafe { try_call!(raw::git_remote_delete(self.raw, name)); } - Ok(()) - } - - /// Add a fetch refspec to the remote's configuration - /// - /// Add the given refspec to the fetch list in the configuration. No loaded - /// remote instances will be affected. - pub fn remote_add_fetch(&self, name: &str, spec: &str) - -> Result<(), Error> { - let name = try!(CString::new(name)); - let spec = try!(CString::new(spec)); - unsafe { - try_call!(raw::git_remote_add_fetch(self.raw, name, spec)); - } - Ok(()) - } - - /// Add a push refspec to the remote's configuration. - /// - /// Add the given refspec to the push list in the configuration. No - /// loaded remote instances will be affected. - pub fn remote_add_push(&self, name: &str, spec: &str) - -> Result<(), Error> { - let name = try!(CString::new(name)); - let spec = try!(CString::new(spec)); - unsafe { - try_call!(raw::git_remote_add_push(self.raw, name, spec)); - } - Ok(()) - } - - /// Set the remote's url in the configuration - /// - /// Remote objects already in memory will not be affected. This assumes - /// the common case of a single-url remote and will otherwise return an - /// error. - pub fn remote_set_url(&self, name: &str, url: &str) -> Result<(), Error> { - let name = try!(CString::new(name)); - let url = try!(CString::new(url)); - unsafe { try_call!(raw::git_remote_set_url(self.raw, name, url)); } - Ok(()) - } - - /// Set the remote's url for pushing in the configuration. - /// - /// Remote objects already in memory will not be affected. This assumes - /// the common case of a single-url remote and will otherwise return an - /// error. - /// - /// `None` indicates that it should be cleared. - pub fn remote_set_pushurl(&self, name: &str, pushurl: Option<&str>) - -> Result<(), Error> { - let name = try!(CString::new(name)); - let pushurl = try!(::opt_cstr(pushurl)); - unsafe { - try_call!(raw::git_remote_set_pushurl(self.raw, name, pushurl)); - } - Ok(()) - } - - /// Sets the current head to the specified object and optionally resets - /// the index and working tree to match. - /// - /// A soft reset means the head will be moved to the commit. - /// - /// A mixed reset will trigger a soft reset, plus the index will be - /// replaced with the content of the commit tree. - /// - /// A hard reset will trigger a mixed reset and the working directory will - /// be replaced with the content of the index. (Untracked and ignored files - /// will be left alone, however.) - /// - /// The `target` is a commit-ish to which the head should be moved to. The - /// object can either be a commit or a tag, but tags must be dereferenceable - /// to a commit. - /// - /// The `checkout` options will only be used for a hard reset. - pub fn reset(&self, - target: &Object, - kind: ResetType, - checkout: Option<&mut CheckoutBuilder>) - -> Result<(), Error> { - unsafe { - let mut opts: raw::git_checkout_options = mem::zeroed(); - try_call!(raw::git_checkout_init_options(&mut opts, - raw::GIT_CHECKOUT_OPTIONS_VERSION)); - let opts = checkout.map(|c| { - c.configure(&mut opts); &mut opts - }); - try_call!(raw::git_reset(self.raw, target.raw(), kind, opts)); - } - Ok(()) - } - - /// Updates some entries in the index from the target commit tree. - /// - /// The scope of the updated entries is determined by the paths being - /// in the iterator provided. - /// - /// Passing a `None` target will result in removing entries in the index - /// matching the provided pathspecs. - pub fn reset_default(&self, - target: Option<&Object>, - paths: I) -> Result<(), Error> - where T: IntoCString, I: IntoIterator, - { - let (_a, _b, mut arr) = try!(::util::iter2cstrs(paths)); - let target = target.map(|t| t.raw()); - unsafe { - try_call!(raw::git_reset_default(self.raw, target, &mut arr)); - } - Ok(()) - } - - /// Retrieve and resolve the reference pointed at by HEAD. - pub fn head(&self) -> Result { - let mut ret = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_repository_head(&mut ret, self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// Make the repository HEAD point to the specified reference. - /// - /// If the provided reference points to a tree or a blob, the HEAD is - /// unaltered and an error is returned. - /// - /// If the provided reference points to a branch, the HEAD will point to - /// that branch, staying attached, or become attached if it isn't yet. If - /// the branch doesn't exist yet, no error will be returned. The HEAD will - /// then be attached to an unborn branch. - /// - /// Otherwise, the HEAD will be detached and will directly point to the - /// commit. - pub fn set_head(&self, refname: &str) -> Result<(), Error> { - let refname = try!(CString::new(refname)); - unsafe { - try_call!(raw::git_repository_set_head(self.raw, refname)); - } - Ok(()) - } - - /// Make the repository HEAD directly point to the commit. - /// - /// If the provided committish cannot be found in the repository, the HEAD - /// is unaltered and an error is returned. - /// - /// If the provided commitish cannot be peeled into a commit, the HEAD is - /// unaltered and an error is returned. - /// - /// Otherwise, the HEAD will eventually be detached and will directly point - /// to the peeled commit. - pub fn set_head_detached(&self, commitish: Oid) -> Result<(), Error> { - unsafe { - try_call!(raw::git_repository_set_head_detached(self.raw, - commitish.raw())); - } - Ok(()) - } - - /// Create an iterator for the repo's references - pub fn references(&self) -> Result { - let mut ret = 0 as *mut raw::git_reference_iterator; - unsafe { - try_call!(raw::git_reference_iterator_new(&mut ret, self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// Create an iterator for the repo's references that match the specified - /// glob - pub fn references_glob(&self, glob: &str) -> Result { - let mut ret = 0 as *mut raw::git_reference_iterator; - let glob = try!(CString::new(glob)); - unsafe { - try_call!(raw::git_reference_iterator_glob_new(&mut ret, self.raw, - glob)); - - Ok(Binding::from_raw(ret)) - } - } - - /// Load all submodules for this repository and return them. - pub fn submodules(&self) -> Result, Error> { - struct Data<'a, 'b:'a> { - repo: &'b Repository, - ret: &'a mut Vec>, - } - let mut ret = Vec::new(); - - unsafe { - let mut data = Data { - repo: self, - ret: &mut ret, - }; - try_call!(raw::git_submodule_foreach(self.raw, append, - &mut data as *mut _ - as *mut c_void)); - } - - return Ok(ret); - - extern fn append(_repo: *mut raw::git_submodule, - name: *const c_char, - data: *mut c_void) -> c_int { - unsafe { - let data = &mut *(data as *mut Data); - let mut raw = 0 as *mut raw::git_submodule; - let rc = raw::git_submodule_lookup(&mut raw, data.repo.raw(), - name); - assert_eq!(rc, 0); - data.ret.push(Binding::from_raw(raw)); - } - 0 - } - } - - /// Gather file status information and populate the returned structure. - /// - /// Note that if a pathspec is given in the options to filter the - /// status, then the results from rename detection (if you enable it) may - /// not be accurate. To do rename detection properly, this must be called - /// with no pathspec so that all files can be considered. - pub fn statuses(&self, options: Option<&mut StatusOptions>) - -> Result { - let mut ret = 0 as *mut raw::git_status_list; - unsafe { - try_call!(raw::git_status_list_new(&mut ret, self.raw, - options.map(|s| s.raw()) - .unwrap_or(0 as *const _))); - Ok(Binding::from_raw(ret)) - } - } - - /// Test if the ignore rules apply to a given file. - /// - /// This function checks the ignore rules to see if they would apply to the - /// given file. This indicates if the file would be ignored regardless of - /// whether the file is already in the index or committed to the repository. - /// - /// One way to think of this is if you were to do "git add ." on the - /// directory containing the file, would it be added or not? - pub fn status_should_ignore(&self, path: &Path) -> Result { - let mut ret = 0 as c_int; - let path = try!(path.into_c_string()); - unsafe { - try_call!(raw::git_status_should_ignore(&mut ret, self.raw, - path)); - } - Ok(ret != 0) - } - - /// Get file status for a single file. - /// - /// This tries to get status for the filename that you give. If no files - /// match that name (in either the HEAD, index, or working directory), this - /// returns NotFound. - /// - /// If the name matches multiple files (for example, if the path names a - /// directory or if running on a case- insensitive filesystem and yet the - /// HEAD has two entries that both match the path), then this returns - /// Ambiguous because it cannot give correct results. - /// - /// This does not do any sort of rename detection. Renames require a set of - /// targets and because of the path filtering, there is not enough - /// information to check renames correctly. To check file status with rename - /// detection, there is no choice but to do a full `statuses` and scan - /// through looking for the path that you are interested in. - pub fn status_file(&self, path: &Path) -> Result { - let mut ret = 0 as c_uint; - let path = try!(path.into_c_string()); - unsafe { - try_call!(raw::git_status_file(&mut ret, self.raw, - path)); - } - Ok(Status::from_bits_truncate(ret as u32)) - } - - /// Create an iterator which loops over the requested branches. - pub fn branches(&self, filter: Option) - -> Result { - let mut raw = 0 as *mut raw::git_branch_iterator; - unsafe { - try_call!(raw::git_branch_iterator_new(&mut raw, self.raw(), filter)); - Ok(Branches::from_raw(raw)) - } - } - - /// Get the Index file for this repository. - /// - /// If a custom index has not been set, the default index for the repository - /// will be returned (the one located in .git/index). - pub fn index(&self) -> Result { - let mut raw = 0 as *mut raw::git_index; - unsafe { - try_call!(raw::git_repository_index(&mut raw, self.raw())); - Ok(Binding::from_raw(raw)) - } - } - - /// Set the Index file for this repository. - pub fn set_index(&self, index: &mut Index) { - unsafe { - raw::git_repository_set_index(self.raw(), index.raw()); - } - } - - /// Get the configuration file for this repository. - /// - /// If a configuration file has not been set, the default config set for the - /// repository will be returned, including global and system configurations - /// (if they are available). - pub fn config(&self) -> Result { - let mut raw = 0 as *mut raw::git_config; - unsafe { - try_call!(raw::git_repository_config(&mut raw, self.raw())); - Ok(Binding::from_raw(raw)) - } - } - - /// Write an in-memory buffer to the ODB as a blob. - /// - /// The Oid returned can in turn be passed to `find_blob` to get a handle to - /// the blob. - pub fn blob(&self, data: &[u8]) -> Result { - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - let ptr = data.as_ptr() as *const c_void; - let len = data.len() as size_t; - try_call!(raw::git_blob_create_frombuffer(&mut raw, self.raw(), - ptr, len)); - Ok(Binding::from_raw(&raw as *const _)) - } - } - - /// Read a file from the filesystem and write its content to the Object - /// Database as a loose blob - /// - /// The Oid returned can in turn be passed to `find_blob` to get a handle to - /// the blob. - pub fn blob_path(&self, path: &Path) -> Result { - let path = try!(path.into_c_string()); - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_blob_create_fromdisk(&mut raw, self.raw(), - path)); - Ok(Binding::from_raw(&raw as *const _)) - } - } - - /// Lookup a reference to one of the objects in a repository. - pub fn find_blob(&self, oid: Oid) -> Result { - let mut raw = 0 as *mut raw::git_blob; - unsafe { - try_call!(raw::git_blob_lookup(&mut raw, self.raw(), oid.raw())); - Ok(Binding::from_raw(raw)) - } - } - - /// Create a new branch pointing at a target commit - /// - /// A new direct reference will be created pointing to this target commit. - /// If `force` is true and a reference already exists with the given name, - /// it'll be replaced. - pub fn branch(&self, - branch_name: &str, - target: &Commit, - force: bool) -> Result { - let branch_name = try!(CString::new(branch_name)); - let mut raw = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_branch_create(&mut raw, - self.raw(), - branch_name, - target.raw(), - force)); - Ok(Branch::wrap(Binding::from_raw(raw))) - } - } - - /// Lookup a branch by its name in a repository. - pub fn find_branch(&self, name: &str, branch_type: BranchType) - -> Result { - let name = try!(CString::new(name)); - let mut ret = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_branch_lookup(&mut ret, self.raw(), name, - branch_type)); - Ok(Branch::wrap(Binding::from_raw(ret))) - } - } - - /// Create new commit in the repository - /// - /// If the `update_ref` is not `None`, name of the reference that will be - /// updated to point to this commit. If the reference is not direct, it will - /// be resolved to a direct reference. Use "HEAD" to update the HEAD of the - /// current branch and make it point to this commit. If the reference - /// doesn't exist yet, it will be created. If it does exist, the first - /// parent must be the tip of this branch. - pub fn commit(&self, - update_ref: Option<&str>, - author: &Signature, - committer: &Signature, - message: &str, - tree: &Tree, - parents: &[&Commit]) -> Result { - let update_ref = try!(::opt_cstr(update_ref)); - let mut parent_ptrs = parents.iter().map(|p| { - p.raw() as *const raw::git_commit - }).collect::>(); - let message = try!(CString::new(message)); - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_commit_create(&mut raw, - self.raw(), - update_ref, - author.raw(), - committer.raw(), - 0 as *const c_char, - message, - tree.raw(), - parents.len() as size_t, - parent_ptrs.as_mut_ptr())); - Ok(Binding::from_raw(&raw as *const _)) - } - } - - - /// Lookup a reference to one of the commits in a repository. - pub fn find_commit(&self, oid: Oid) -> Result { - let mut raw = 0 as *mut raw::git_commit; - unsafe { - try_call!(raw::git_commit_lookup(&mut raw, self.raw(), oid.raw())); - Ok(Binding::from_raw(raw)) - } - } - - /// Lookup a reference to one of the objects in a repository. - pub fn find_object(&self, oid: Oid, - kind: Option) -> Result { - let mut raw = 0 as *mut raw::git_object; - unsafe { - try_call!(raw::git_object_lookup(&mut raw, self.raw(), oid.raw(), - kind)); - Ok(Binding::from_raw(raw)) - } - } - - /// Create a new direct reference. - /// - /// This function will return an error if a reference already exists with - /// the given name unless force is true, in which case it will be - /// overwritten. - pub fn reference(&self, name: &str, id: Oid, force: bool, - log_message: &str) -> Result { - let name = try!(CString::new(name)); - let log_message = try!(CString::new(log_message)); - let mut raw = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_reference_create(&mut raw, self.raw(), name, - id.raw(), force, - log_message)); - Ok(Binding::from_raw(raw)) - } - } - - /// Conditionally create new direct reference. - /// - /// A direct reference (also called an object id reference) refers directly - /// to a specific object id (a.k.a. OID or SHA) in the repository. The id - /// permanently refers to the object (although the reference itself can be - /// moved). For example, in libgit2 the direct ref "refs/tags/v0.17.0" - /// refers to OID 5b9fac39d8a76b9139667c26a63e6b3f204b3977. - /// - /// The direct reference will be created in the repository and written to - /// the disk. - /// - /// Valid reference names must follow one of two patterns: - /// - /// 1. Top-level names must contain only capital letters and underscores, - /// and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). - /// 2. Names prefixed with "refs/" can be almost anything. You must avoid - /// the characters `~`, `^`, `:`, `\\`, `?`, `[`, and `*`, and the - /// sequences ".." and "@{" which have special meaning to revparse. - /// - /// This function will return an error if a reference already exists with - /// the given name unless `force` is true, in which case it will be - /// overwritten. - /// - /// The message for the reflog will be ignored if the reference does not - /// belong in the standard set (HEAD, branches and remote-tracking - /// branches) and it does not have a reflog. - /// - /// It will return GIT_EMODIFIED if the reference's value at the time of - /// updating does not match the one passed through `current_id` (i.e. if the - /// ref has changed since the user read it). - pub fn reference_matching(&self, - name: &str, - id: Oid, - force: bool, - current_id: Oid, - log_message: &str) -> Result { - let name = try!(CString::new(name)); - let log_message = try!(CString::new(log_message)); - let mut raw = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_reference_create_matching(&mut raw, - self.raw(), - name, - id.raw(), - force, - current_id.raw(), - log_message)); - Ok(Binding::from_raw(raw)) - } - } - - /// Create a new symbolic reference. - /// - /// This function will return an error if a reference already exists with - /// the given name unless force is true, in which case it will be - /// overwritten. - pub fn reference_symbolic(&self, name: &str, target: &str, - force: bool, - log_message: &str) - -> Result { - let name = try!(CString::new(name)); - let target = try!(CString::new(target)); - let log_message = try!(CString::new(log_message)); - let mut raw = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_reference_symbolic_create(&mut raw, self.raw(), - name, target, force, - log_message)); - Ok(Binding::from_raw(raw)) - } - } - - /// Create a new symbolic reference. - /// - /// This function will return an error if a reference already exists with - /// the given name unless force is true, in which case it will be - /// overwritten. - /// - /// It will return GIT_EMODIFIED if the reference's value at the time of - /// updating does not match the one passed through current_value (i.e. if - /// the ref has changed since the user read it). - pub fn reference_symbolic_matching(&self, - name: &str, - target: &str, - force: bool, - current_value: &str, - log_message: &str) - -> Result { - let name = try!(CString::new(name)); - let target = try!(CString::new(target)); - let current_value = try!(CString::new(current_value)); - let log_message = try!(CString::new(log_message)); - let mut raw = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_reference_symbolic_create_matching(&mut raw, - self.raw(), - name, - target, - force, - current_value, - log_message)); - Ok(Binding::from_raw(raw)) - } - } - - /// Lookup a reference to one of the objects in a repository. - pub fn find_reference(&self, name: &str) -> Result { - let name = try!(CString::new(name)); - let mut raw = 0 as *mut raw::git_reference; - unsafe { - try_call!(raw::git_reference_lookup(&mut raw, self.raw(), name)); - Ok(Binding::from_raw(raw)) - } - } - - /// Lookup a reference by name and resolve immediately to OID. - /// - /// This function provides a quick way to resolve a reference name straight - /// through to the object id that it refers to. This avoids having to - /// allocate or free any `Reference` objects for simple situations. - pub fn refname_to_id(&self, name: &str) -> Result { - let name = try!(CString::new(name)); - let mut ret = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_reference_name_to_id(&mut ret, self.raw(), name)); - Ok(Binding::from_raw(&ret as *const _)) - } - } - - /// Creates a git_annotated_commit from the given reference. - pub fn reference_to_annotated_commit(&self, reference: &Reference) - -> Result { - let mut ret = 0 as *mut raw::git_annotated_commit; - unsafe { - try_call!(raw::git_annotated_commit_from_ref(&mut ret, - self.raw(), - reference.raw())); - Ok(AnnotatedCommit::from_raw(ret)) - } - } - - /// Create a new action signature with default user and now timestamp. - /// - /// This looks up the user.name and user.email from the configuration and - /// uses the current time as the timestamp, and creates a new signature - /// based on that information. It will return `NotFound` if either the - /// user.name or user.email are not set. - pub fn signature(&self) -> Result, Error> { - let mut ret = 0 as *mut raw::git_signature; - unsafe { - try_call!(raw::git_signature_default(&mut ret, self.raw())); - Ok(Binding::from_raw(ret)) - } - } - - /// Set up a new git submodule for checkout. - /// - /// This does "git submodule add" up to the fetch and checkout of the - /// submodule contents. It preps a new submodule, creates an entry in - /// `.gitmodules` and creates an empty initialized repository either at the - /// given path in the working directory or in `.git/modules` with a gitlink - /// from the working directory to the new repo. - /// - /// To fully emulate "git submodule add" call this function, then `open()` - /// the submodule repo and perform the clone step as needed. Lastly, call - /// `finalize()` to wrap up adding the new submodule and `.gitmodules` to - /// the index to be ready to commit. - pub fn submodule(&self, url: &str, path: &Path, - use_gitlink: bool) -> Result { - let url = try!(CString::new(url)); - let path = try!(path.into_c_string()); - let mut raw = 0 as *mut raw::git_submodule; - unsafe { - try_call!(raw::git_submodule_add_setup(&mut raw, self.raw(), - url, path, use_gitlink)); - Ok(Binding::from_raw(raw)) - } - } - - /// Lookup submodule information by name or path. - /// - /// Given either the submodule name or path (they are usually the same), - /// this returns a structure describing the submodule. - pub fn find_submodule(&self, name: &str) -> Result { - let name = try!(CString::new(name)); - let mut raw = 0 as *mut raw::git_submodule; - unsafe { - try_call!(raw::git_submodule_lookup(&mut raw, self.raw(), name)); - Ok(Binding::from_raw(raw)) - } - } - - /// Get the status for a submodule. - /// - /// This looks at a submodule and tries to determine the status. It - /// will return a combination of the `SubmoduleStatus` values. - pub fn submodule_status(&self, name: &str, ignore: SubmoduleIgnore) - -> Result { - let mut ret = 0; - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_submodule_status(&mut ret, self.raw, name, - ignore)); - } - Ok(SubmoduleStatus::from_bits_truncate(ret as u32)) - } - - /// Lookup a reference to one of the objects in a repository. - pub fn find_tree(&self, oid: Oid) -> Result { - let mut raw = 0 as *mut raw::git_tree; - unsafe { - try_call!(raw::git_tree_lookup(&mut raw, self.raw(), oid.raw())); - Ok(Binding::from_raw(raw)) - } - } - - /// Create a new TreeBuilder, optionally initialized with the - /// entries of the given Tree. - /// - /// The tree builder can be used to create or modify trees in memory and - /// write them as tree objects to the database. - pub fn treebuilder(&self, tree: Option<&Tree>) -> Result { - unsafe { - let mut ret = 0 as *mut raw::git_treebuilder; - let tree = match tree { - Some(tree) => tree.raw(), - None => 0 as *mut raw::git_tree, - }; - try_call!(raw::git_treebuilder_new(&mut ret, self.raw, tree)); - Ok(Binding::from_raw(ret)) - } - } - - - /// Create a new tag in the repository from an object - /// - /// A new reference will also be created pointing to this tag object. If - /// `force` is true and a reference already exists with the given name, - /// it'll be replaced. - /// - /// The message will not be cleaned up. - /// - /// The tag name will be checked for validity. You must avoid the characters - /// '~', '^', ':', ' \ ', '?', '[', and '*', and the sequences ".." and " @ - /// {" which have special meaning to revparse. - pub fn tag(&self, name: &str, target: &Object, - tagger: &Signature, message: &str, - force: bool) -> Result { - let name = try!(CString::new(name)); - let message = try!(CString::new(message)); - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_tag_create(&mut raw, self.raw, name, - target.raw(), tagger.raw(), - message, force)); - Ok(Binding::from_raw(&raw as *const _)) - } - } - - /// Create a new lightweight tag pointing at a target object - /// - /// A new direct reference will be created pointing to this target object. - /// If force is true and a reference already exists with the given name, - /// it'll be replaced. - pub fn tag_lightweight(&self, - name: &str, - target: &Object, - force: bool) -> Result { - let name = try!(CString::new(name)); - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_tag_create_lightweight(&mut raw, self.raw, name, - target.raw(), force)); - Ok(Binding::from_raw(&raw as *const _)) - } - } - - /// Lookup a tag object from the repository. - pub fn find_tag(&self, id: Oid) -> Result { - let mut raw = 0 as *mut raw::git_tag; - unsafe { - try_call!(raw::git_tag_lookup(&mut raw, self.raw, id.raw())); - Ok(Binding::from_raw(raw)) - } - } - - /// Delete an existing tag reference. - /// - /// The tag name will be checked for validity, see `tag` for some rules - /// about valid names. - pub fn tag_delete(&self, name: &str) -> Result<(), Error> { - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_tag_delete(self.raw, name)); - Ok(()) - } - } - - /// Get a list with all the tags in the repository. - /// - /// An optional fnmatch pattern can also be specified. - pub fn tag_names(&self, pattern: Option<&str>) -> Result { - let mut arr = raw::git_strarray { - strings: 0 as *mut *mut c_char, - count: 0, - }; - unsafe { - match pattern { - Some(s) => { - let s = try!(CString::new(s)); - try_call!(raw::git_tag_list_match(&mut arr, s, self.raw)); - } - None => { try_call!(raw::git_tag_list(&mut arr, self.raw)); } - } - Ok(Binding::from_raw(arr)) - } - } - - /// Updates files in the index and the working tree to match the content of - /// the commit pointed at by HEAD. - pub fn checkout_head(&self, opts: Option<&mut CheckoutBuilder>) - -> Result<(), Error> { - unsafe { - let mut raw_opts = mem::zeroed(); - try_call!(raw::git_checkout_init_options(&mut raw_opts, - raw::GIT_CHECKOUT_OPTIONS_VERSION)); - if let Some(c) = opts { - c.configure(&mut raw_opts); - } - - try_call!(raw::git_checkout_head(self.raw, &raw_opts)); - } - Ok(()) - } - - /// Updates files in the working tree to match the content of the index. - /// - /// If the index is `None`, the repository's index will be used. - pub fn checkout_index(&self, - index: Option<&mut Index>, - opts: Option<&mut CheckoutBuilder>) -> Result<(), Error> { - unsafe { - let mut raw_opts = mem::zeroed(); - try_call!(raw::git_checkout_init_options(&mut raw_opts, - raw::GIT_CHECKOUT_OPTIONS_VERSION)); - match opts { - Some(c) => c.configure(&mut raw_opts), - None => {} - } - - try_call!(raw::git_checkout_index(self.raw, - index.map(|i| &mut *i.raw()), - &raw_opts)); - } - Ok(()) - } - - /// Updates files in the index and working tree to match the content of the - /// tree pointed at by the treeish. - pub fn checkout_tree(&self, - treeish: &Object, - opts: Option<&mut CheckoutBuilder>) -> Result<(), Error> { - unsafe { - let mut raw_opts = mem::zeroed(); - try_call!(raw::git_checkout_init_options(&mut raw_opts, - raw::GIT_CHECKOUT_OPTIONS_VERSION)); - match opts { - Some(c) => c.configure(&mut raw_opts), - None => {} - } - - try_call!(raw::git_checkout_tree(self.raw, &*treeish.raw(), - &raw_opts)); - } - Ok(()) - } - - /// Merges the given commit(s) into HEAD, writing the results into the - /// working directory. Any changes are staged for commit and any conflicts - /// are written to the index. Callers should inspect the repository's index - /// after this completes, resolve any conflicts and prepare a commit. - /// - /// For compatibility with git, the repository is put into a merging state. - /// Once the commit is done (or if the uses wishes to abort), you should - /// clear this state by calling git_repository_state_cleanup(). - pub fn merge(&self, - annotated_commits: &[&AnnotatedCommit], - merge_opts: Option<&mut MergeOptions>, - checkout_opts: Option<&mut CheckoutBuilder>) - -> Result<(), Error> - { - unsafe { - let mut raw_checkout_opts = mem::zeroed(); - try_call!(raw::git_checkout_init_options(&mut raw_checkout_opts, - raw::GIT_CHECKOUT_OPTIONS_VERSION)); - if let Some(c) = checkout_opts { - c.configure(&mut raw_checkout_opts); - } - - let mut commit_ptrs = annotated_commits.iter().map(|c| { - c.raw() as *const raw::git_annotated_commit - }).collect::>(); - - try_call!(raw::git_merge(self.raw, - commit_ptrs.as_mut_ptr(), - annotated_commits.len() as size_t, - merge_opts.map(|o| o.raw()) - .unwrap_or(0 as *const _), - &raw_checkout_opts)); - } - Ok(()) - } - - /// Merge two commits, producing an index that reflects the result of - /// the merge. The index may be written as-is to the working directory or - /// checked out. If the index is to be converted to a tree, the caller - /// should resolve any conflicts that arose as part of the merge. - pub fn merge_commits(&self, our_commit: &Commit, their_commit: &Commit, - opts: Option<&MergeOptions>) -> Result { - let mut raw = 0 as *mut raw::git_index; - unsafe { - try_call!(raw::git_merge_commits(&mut raw, self.raw, - our_commit.raw(), - their_commit.raw(), - opts.map(|o| o.raw()))); - Ok(Binding::from_raw(raw)) - } - } - - /// Remove all the metadata associated with an ongoing command like merge, - /// revert, cherry-pick, etc. For example: MERGE_HEAD, MERGE_MSG, etc. - pub fn cleanup_state(&self) -> Result<(), Error> { - unsafe { - try_call!(raw::git_repository_state_cleanup(self.raw)); - } - Ok(()) - } - - /// Add a note for an object - /// - /// The `notes_ref` argument is the canonical name of the reference to use, - /// defaulting to "refs/notes/commits". If `force` is specified then - /// previous notes are overwritten. - pub fn note(&self, - author: &Signature, - committer: &Signature, - notes_ref: Option<&str>, - oid: Oid, - note: &str, - force: bool) -> Result { - let notes_ref = try!(::opt_cstr(notes_ref)); - let note = try!(CString::new(note)); - let mut ret = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_note_create(&mut ret, - self.raw, - notes_ref, - author.raw(), - committer.raw(), - oid.raw(), - note, - force)); - Ok(Binding::from_raw(&ret as *const _)) - } - } - - /// Get the default notes reference for this repository - pub fn note_default_ref(&self) -> Result { - let ret = Buf::new(); - unsafe { - try_call!(raw::git_note_default_ref(ret.raw(), self.raw)); - } - Ok(str::from_utf8(&ret).unwrap().to_string()) - } - - /// Creates a new iterator for notes in this repository. - /// - /// The `notes_ref` argument is the canonical name of the reference to use, - /// defaulting to "refs/notes/commits". - /// - /// The iterator returned yields pairs of (Oid, Oid) where the first element - /// is the id of the note and the second id is the id the note is - /// annotating. - pub fn notes(&self, notes_ref: Option<&str>) -> Result { - let notes_ref = try!(::opt_cstr(notes_ref)); - let mut ret = 0 as *mut raw::git_note_iterator; - unsafe { - try_call!(raw::git_note_iterator_new(&mut ret, self.raw, notes_ref)); - Ok(Binding::from_raw(ret)) - } - } - - /// Read the note for an object. - /// - /// The `notes_ref` argument is the canonical name of the reference to use, - /// defaulting to "refs/notes/commits". - /// - /// The id specified is the Oid of the git object to read the note from. - pub fn find_note(&self, notes_ref: Option<&str>, id: Oid) - -> Result { - let notes_ref = try!(::opt_cstr(notes_ref)); - let mut ret = 0 as *mut raw::git_note; - unsafe { - try_call!(raw::git_note_read(&mut ret, self.raw, notes_ref, - id.raw())); - Ok(Binding::from_raw(ret)) - } - } - - /// Remove the note for an object. - /// - /// The `notes_ref` argument is the canonical name of the reference to use, - /// defaulting to "refs/notes/commits". - /// - /// The id specified is the Oid of the git object to remove the note from. - pub fn note_delete(&self, - id: Oid, - notes_ref: Option<&str>, - author: &Signature, - committer: &Signature) -> Result<(), Error> { - let notes_ref = try!(::opt_cstr(notes_ref)); - unsafe { - try_call!(raw::git_note_remove(self.raw, notes_ref, author.raw(), - committer.raw(), id.raw())); - Ok(()) - } - } - - /// Create a revwalk that can be used to traverse the commit graph. - pub fn revwalk(&self) -> Result { - let mut raw = 0 as *mut raw::git_revwalk; - unsafe { - try_call!(raw::git_revwalk_new(&mut raw, self.raw())); - Ok(Binding::from_raw(raw)) - } - } - - /// Get the blame for a single file. - pub fn blame_file(&self, path: &Path, opts: Option<&mut BlameOptions>) - -> Result { - let path = try!(path.into_c_string()); - let mut raw = 0 as *mut raw::git_blame; - - unsafe { - try_call!(raw::git_blame_file(&mut raw, - self.raw(), - path, - opts.map(|s| s.raw()))); - Ok(Binding::from_raw(raw)) - } - } - - /// Find a merge base between two commits - pub fn merge_base(&self, one: Oid, two: Oid) -> Result { - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_merge_base(&mut raw, self.raw, - one.raw(), two.raw())); - Ok(Binding::from_raw(&raw as *const _)) - } - } - - /// Find all merge bases between two commits - pub fn merge_bases(&self, one: Oid, two: Oid) -> Result { - let mut arr = raw::git_oidarray { - ids: 0 as *mut raw::git_oid, - count: 0, - }; - unsafe { - try_call!(raw::git_merge_bases(&mut arr, self.raw, - one.raw(), two.raw())); - Ok(Binding::from_raw(arr)) - } - } - - - /// Count the number of unique commits between two commit objects - /// - /// There is no need for branches containing the commits to have any - /// upstream relationship, but it helps to think of one as a branch and the - /// other as its upstream, the ahead and behind values will be what git - /// would report for the branches. - pub fn graph_ahead_behind(&self, local: Oid, upstream: Oid) - -> Result<(usize, usize), Error> { - unsafe { - let mut ahead: size_t = 0; - let mut behind: size_t = 0; - try_call!(raw::git_graph_ahead_behind(&mut ahead, &mut behind, - self.raw(), local.raw(), - upstream.raw())); - Ok((ahead as usize, behind as usize)) - } - } - - /// Determine if a commit is the descendant of another commit - pub fn graph_descendant_of(&self, commit: Oid, ancestor: Oid) - -> Result { - unsafe { - let rv = try_call!(raw::git_graph_descendant_of(self.raw(), - commit.raw(), - ancestor.raw())); - Ok(rv != 0) - } - } - - /// Read the reflog for the given reference - /// - /// If there is no reflog file for the given reference yet, an empty reflog - /// object will be returned. - pub fn reflog(&self, name: &str) -> Result { - let name = try!(CString::new(name)); - let mut ret = 0 as *mut raw::git_reflog; - unsafe { - try_call!(raw::git_reflog_read(&mut ret, self.raw, name)); - Ok(Binding::from_raw(ret)) - } - } - - /// Delete the reflog for the given reference - pub fn reflog_delete(&self, name: &str) -> Result<(), Error> { - let name = try!(CString::new(name)); - unsafe { try_call!(raw::git_reflog_delete(self.raw, name)); } - Ok(()) - } - - /// Rename a reflog - /// - /// The reflog to be renamed is expected to already exist. - pub fn reflog_rename(&self, old_name: &str, new_name: &str) - -> Result<(), Error> { - let old_name = try!(CString::new(old_name)); - let new_name = try!(CString::new(new_name)); - unsafe { - try_call!(raw::git_reflog_rename(self.raw, old_name, new_name)); - } - Ok(()) - } - - /// Check if the given reference has a reflog. - pub fn reference_has_log(&self, name: &str) -> Result { - let name = try!(CString::new(name)); - let ret = unsafe { - try_call!(raw::git_reference_has_log(self.raw, name)) - }; - Ok(ret != 0) - } - - /// Ensure that the given reference has a reflog. - pub fn reference_ensure_log(&self, name: &str) -> Result<(), Error> { - let name = try!(CString::new(name)); - unsafe { - try_call!(raw::git_reference_ensure_log(self.raw, name)); - } - Ok(()) - } - - /// Describes a commit - /// - /// Performs a describe operation on the current commit and the worktree. - /// After performing a describe on HEAD, a status is run and description is - /// considered to be dirty if there are. - pub fn describe(&self, opts: &DescribeOptions) -> Result { - let mut ret = 0 as *mut _; - unsafe { - try_call!(raw::git_describe_workdir(&mut ret, self.raw, opts.raw())); - Ok(Binding::from_raw(ret)) - } - } - - /// Create a diff with the difference between two tree objects. - /// - /// This is equivalent to `git diff ` - /// - /// The first tree will be used for the "old_file" side of the delta and the - /// second tree will be used for the "new_file" side of the delta. You can - /// pass `None` to indicate an empty tree, although it is an error to pass - /// `None` for both the `old_tree` and `new_tree`. - pub fn diff_tree_to_tree(&self, - old_tree: Option<&Tree>, - new_tree: Option<&Tree>, - opts: Option<&mut DiffOptions>) - -> Result { - let mut ret = 0 as *mut raw::git_diff; - unsafe { - try_call!(raw::git_diff_tree_to_tree(&mut ret, - self.raw(), - old_tree.map(|s| s.raw()), - new_tree.map(|s| s.raw()), - opts.map(|s| s.raw()))); - Ok(Binding::from_raw(ret)) - } - } - - /// Create a diff between a tree and repository index. - /// - /// This is equivalent to `git diff --cached ` or if you pass - /// the HEAD tree, then like `git diff --cached`. - /// - /// The tree you pass will be used for the "old_file" side of the delta, and - /// the index will be used for the "new_file" side of the delta. - /// - /// If you pass `None` for the index, then the existing index of the `repo` - /// will be used. In this case, the index will be refreshed from disk - /// (if it has changed) before the diff is generated. - /// - /// If the tree is `None`, then it is considered an empty tree. - pub fn diff_tree_to_index(&self, - old_tree: Option<&Tree>, - index: Option<&Index>, - opts: Option<&mut DiffOptions>) - -> Result { - let mut ret = 0 as *mut raw::git_diff; - unsafe { - try_call!(raw::git_diff_tree_to_index(&mut ret, - self.raw(), - old_tree.map(|s| s.raw()), - index.map(|s| s.raw()), - opts.map(|s| s.raw()))); - Ok(Binding::from_raw(ret)) - } - } - - /// Create a diff between two index objects. - /// - /// The first index will be used for the "old_file" side of the delta, and - /// the second index will be used for the "new_file" side of the delta. - pub fn diff_index_to_index(&self, - old_index: &Index, - new_index: &Index, - opts: Option<&mut DiffOptions>) - -> Result { - let mut ret = 0 as *mut raw::git_diff; - unsafe { - try_call!(raw::git_diff_index_to_index(&mut ret, - self.raw(), - old_index.raw(), - new_index.raw(), - opts.map(|s| s.raw()))); - Ok(Binding::from_raw(ret)) - } - } - - /// Create a diff between the repository index and the workdir directory. - /// - /// This matches the `git diff` command. See the note below on - /// `tree_to_workdir` for a discussion of the difference between - /// `git diff` and `git diff HEAD` and how to emulate a `git diff ` - /// using libgit2. - /// - /// The index will be used for the "old_file" side of the delta, and the - /// working directory will be used for the "new_file" side of the delta. - /// - /// If you pass `None` for the index, then the existing index of the `repo` - /// will be used. In this case, the index will be refreshed from disk - /// (if it has changed) before the diff is generated. - pub fn diff_index_to_workdir(&self, - index: Option<&Index>, - opts: Option<&mut DiffOptions>) - -> Result { - let mut ret = 0 as *mut raw::git_diff; - unsafe { - try_call!(raw::git_diff_index_to_workdir(&mut ret, - self.raw(), - index.map(|s| s.raw()), - opts.map(|s| s.raw()))); - Ok(Binding::from_raw(ret)) - } - } - - /// Create a diff between a tree and the working directory. - /// - /// The tree you provide will be used for the "old_file" side of the delta, - /// and the working directory will be used for the "new_file" side. - /// - /// This is not the same as `git diff ` or `git diff-index - /// `. Those commands use information from the index, whereas this - /// function strictly returns the differences between the tree and the files - /// in the working directory, regardless of the state of the index. Use - /// `tree_to_workdir_with_index` to emulate those commands. - /// - /// To see difference between this and `tree_to_workdir_with_index`, - /// consider the example of a staged file deletion where the file has then - /// been put back into the working dir and further modified. The - /// tree-to-workdir diff for that file is 'modified', but `git diff` would - /// show status 'deleted' since there is a staged delete. - /// - /// If `None` is passed for `tree`, then an empty tree is used. - pub fn diff_tree_to_workdir(&self, - old_tree: Option<&Tree>, - opts: Option<&mut DiffOptions>) - -> Result { - let mut ret = 0 as *mut raw::git_diff; - unsafe { - try_call!(raw::git_diff_tree_to_workdir(&mut ret, - self.raw(), - old_tree.map(|s| s.raw()), - opts.map(|s| s.raw()))); - Ok(Binding::from_raw(ret)) - } - } - - /// Create a diff between a tree and the working directory using index data - /// to account for staged deletes, tracked files, etc. - /// - /// This emulates `git diff ` by diffing the tree to the index and - /// the index to the working directory and blending the results into a - /// single diff that includes staged deleted, etc. - pub fn diff_tree_to_workdir_with_index(&self, - old_tree: Option<&Tree>, - opts: Option<&mut DiffOptions>) - -> Result { - let mut ret = 0 as *mut raw::git_diff; - unsafe { - try_call!(raw::git_diff_tree_to_workdir_with_index(&mut ret, - self.raw(), old_tree.map(|s| s.raw()), opts.map(|s| s.raw()))); - Ok(Binding::from_raw(ret)) - } - } - - /// Create a PackBuilder - pub fn packbuilder(&self) -> Result { - let mut ret = 0 as *mut raw::git_packbuilder; - unsafe { - try_call!(raw::git_packbuilder_new(&mut ret, self.raw())); - Ok(Binding::from_raw(ret)) - } - } - -} - -impl Binding for Repository { - type Raw = *mut raw::git_repository; - unsafe fn from_raw(ptr: *mut raw::git_repository) -> Repository { - Repository { raw: ptr } - } - fn raw(&self) -> *mut raw::git_repository { self.raw } -} - -impl Drop for Repository { - fn drop(&mut self) { - unsafe { raw::git_repository_free(self.raw) } - } -} - -impl RepositoryInitOptions { - /// Creates a default set of initialization options. - /// - /// By default this will set flags for creating all necessary directories - /// and initializing a directory from the user-configured templates path. - pub fn new() -> RepositoryInitOptions { - RepositoryInitOptions { - flags: raw::GIT_REPOSITORY_INIT_MKDIR as u32 | - raw::GIT_REPOSITORY_INIT_MKPATH as u32 | - raw::GIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE as u32, - mode: 0, - workdir_path: None, - description: None, - template_path: None, - initial_head: None, - origin_url: None, - } - } - - /// Create a bare repository with no working directory. - /// - /// Defaults to false. - pub fn bare(&mut self, bare: bool) -> &mut RepositoryInitOptions { - self.flag(raw::GIT_REPOSITORY_INIT_BARE, bare) - } - - /// Return an error if the repository path appears to already be a git - /// repository. - /// - /// Defaults to false. - pub fn no_reinit(&mut self, enabled: bool) -> &mut RepositoryInitOptions { - self.flag(raw::GIT_REPOSITORY_INIT_NO_REINIT, enabled) - } - - /// Normally a '/.git/' will be appended to the repo apth for non-bare repos - /// (if it is not already there), but passing this flag prevents that - /// behavior. - /// - /// Defaults to false. - pub fn no_dotgit_dir(&mut self, enabled: bool) -> &mut RepositoryInitOptions { - self.flag(raw::GIT_REPOSITORY_INIT_NO_DOTGIT_DIR, enabled) - } - - /// Make the repo path (and workdir path) as needed. The ".git" directory - /// will always be created regardless of this flag. - /// - /// Defaults to true. - pub fn mkdir(&mut self, enabled: bool) -> &mut RepositoryInitOptions { - self.flag(raw::GIT_REPOSITORY_INIT_MKDIR, enabled) - } - - /// Recursively make all components of the repo and workdir path sas - /// necessary. - /// - /// Defaults to true. - pub fn mkpath(&mut self, enabled: bool) -> &mut RepositoryInitOptions { - self.flag(raw::GIT_REPOSITORY_INIT_MKPATH, enabled) - } - - /// Set to one of the `RepositoryInit` constants, or a custom value. - pub fn mode(&mut self, mode: RepositoryInitMode) - -> &mut RepositoryInitOptions { - self.mode = mode.bits(); - self - } - - /// Enable or disable using external templates. - /// - /// If enabled, then the `template_path` option will be queried first, then - /// `init.templatedir` from the global config, and finally - /// `/usr/share/git-core-templates` will be used (if it exists). - /// - /// Defaults to true. - pub fn external_template(&mut self, enabled: bool) - -> &mut RepositoryInitOptions { - self.flag(raw::GIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE, enabled) - } - - fn flag(&mut self, flag: raw::git_repository_init_flag_t, on: bool) - -> &mut RepositoryInitOptions { - if on { - self.flags |= flag as u32; - } else { - self.flags &= !(flag as u32); - } - self - } - - /// The path do the working directory. - /// - /// If this is a relative path it will be evaulated relative to the repo - /// path. If this is not the "natural" working directory, a .git gitlink - /// file will be created here linking to the repo path. - pub fn workdir_path(&mut self, path: &Path) -> &mut RepositoryInitOptions { - self.workdir_path = Some(path.into_c_string().unwrap()); - self - } - - /// If set, this will be used to initialize the "description" file in the - /// repository instead of using the template content. - pub fn description(&mut self, desc: &str) -> &mut RepositoryInitOptions { - self.description = Some(CString::new(desc).unwrap()); - self - } - - /// When the `external_template` option is set, this is the first location - /// to check for the template directory. - /// - /// If this is not configured, then the default locations will be searched - /// instead. - pub fn template_path(&mut self, path: &Path) -> &mut RepositoryInitOptions { - self.template_path = Some(path.into_c_string().unwrap()); - self - } - - /// The name of the head to point HEAD at. - /// - /// If not configured, this will be treated as `master` and the HEAD ref - /// will be set to `refs/heads/master`. If this begins with `refs/` it will - /// be used verbatim; otherwise `refs/heads/` will be prefixed - pub fn initial_head(&mut self, head: &str) -> &mut RepositoryInitOptions { - self.initial_head = Some(CString::new(head).unwrap()); - self - } - - /// If set, then after the rest of the repository initialization is - /// completed an `origin` remote will be added pointing to this URL. - pub fn origin_url(&mut self, url: &str) -> &mut RepositoryInitOptions { - self.origin_url = Some(CString::new(url).unwrap()); - self - } - - /// Creates a set of raw init options to be used with - /// `git_repository_init_ext`. - /// - /// This method is unsafe as the returned value may have pointers to the - /// interior of this structure. - pub unsafe fn raw(&self) -> raw::git_repository_init_options { - let mut opts = mem::zeroed(); - assert_eq!(raw::git_repository_init_init_options(&mut opts, - raw::GIT_REPOSITORY_INIT_OPTIONS_VERSION), 0); - opts.flags = self.flags; - opts.mode = self.mode; - opts.workdir_path = ::call::convert(&self.workdir_path); - opts.description = ::call::convert(&self.description); - opts.template_path = ::call::convert(&self.template_path); - opts.initial_head = ::call::convert(&self.initial_head); - opts.origin_url = ::call::convert(&self.origin_url); - return opts; - } -} - -#[cfg(test)] -mod tests { - use std::ffi::OsStr; - use std::fs; - use std::path::Path; - use tempdir::TempDir; - use {Repository, Oid, ObjectType, ResetType}; - use build::CheckoutBuilder; - - #[test] - fn smoke_init() { - let td = TempDir::new("test").unwrap(); - let path = td.path(); - - let repo = Repository::init(path).unwrap(); - assert!(!repo.is_bare()); - } - - #[test] - fn smoke_init_bare() { - let td = TempDir::new("test").unwrap(); - let path = td.path(); - - let repo = Repository::init_bare(path).unwrap(); - assert!(repo.is_bare()); - assert!(repo.namespace().is_none()); - } - - #[test] - fn smoke_open() { - let td = TempDir::new("test").unwrap(); - let path = td.path(); - Repository::init(td.path()).unwrap(); - let repo = Repository::open(path).unwrap(); - assert!(!repo.is_bare()); - assert!(!repo.is_shallow()); - assert!(repo.is_empty().unwrap()); - assert_eq!(::test::realpath(&repo.path()).unwrap(), - ::test::realpath(&td.path().join(".git/")).unwrap()); - assert_eq!(repo.state(), ::RepositoryState::Clean); - } - - #[test] - fn smoke_open_bare() { - let td = TempDir::new("test").unwrap(); - let path = td.path(); - Repository::init_bare(td.path()).unwrap(); - - let repo = Repository::open(path).unwrap(); - assert!(repo.is_bare()); - assert_eq!(::test::realpath(&repo.path()).unwrap(), - ::test::realpath(&td.path().join("")).unwrap()); - } - - #[test] - fn smoke_checkout() { - let (_td, repo) = ::test::repo_init(); - repo.checkout_head(None).unwrap(); - } - - #[test] - fn smoke_revparse() { - let (_td, repo) = ::test::repo_init(); - let rev = repo.revparse("HEAD").unwrap(); - assert!(rev.to().is_none()); - let from = rev.from().unwrap(); - assert!(rev.from().is_some()); - - assert_eq!(repo.revparse_single("HEAD").unwrap().id(), from.id()); - let obj = repo.find_object(from.id(), None).unwrap().clone(); - obj.peel(ObjectType::Any).unwrap(); - obj.short_id().unwrap(); - repo.reset(&obj, ResetType::Hard, None).unwrap(); - let mut opts = CheckoutBuilder::new(); - t!(repo.reset(&obj, ResetType::Soft, Some(&mut opts))); - } - - #[test] - fn makes_dirs() { - let td = TempDir::new("foo").unwrap(); - Repository::init(&td.path().join("a/b/c/d")).unwrap(); - } - - #[test] - fn smoke_discover() { - let td = TempDir::new("test").unwrap(); - let subdir = td.path().join("subdi"); - fs::create_dir(&subdir).unwrap(); - Repository::init_bare(td.path()).unwrap(); - let repo = Repository::discover(&subdir).unwrap(); - assert_eq!(::test::realpath(&repo.path()).unwrap(), - ::test::realpath(&td.path().join("")).unwrap()); - } - - #[test] - fn smoke_open_ext() { - let td = TempDir::new("test").unwrap(); - let subdir = td.path().join("subdir"); - fs::create_dir(&subdir).unwrap(); - Repository::init(td.path()).unwrap(); - - let repo = Repository::open_ext(&subdir, ::RepositoryOpenFlags::empty(), &[] as &[&OsStr]).unwrap(); - assert!(!repo.is_bare()); - assert_eq!(::test::realpath(&repo.path()).unwrap(), - ::test::realpath(&td.path().join(".git")).unwrap()); - - let repo = Repository::open_ext(&subdir, ::REPOSITORY_OPEN_BARE, &[] as &[&OsStr]).unwrap(); - assert!(repo.is_bare()); - assert_eq!(::test::realpath(&repo.path()).unwrap(), - ::test::realpath(&td.path().join(".git")).unwrap()); - - let err = Repository::open_ext(&subdir, ::REPOSITORY_OPEN_NO_SEARCH, &[] as &[&OsStr]).err().unwrap(); - assert_eq!(err.code(), ::ErrorCode::NotFound); - - assert!(Repository::open_ext(&subdir, - ::RepositoryOpenFlags::empty(), - &[&subdir]).is_ok()); - } - - fn graph_repo_init() -> (TempDir, Repository) { - let (_td, repo) = ::test::repo_init(); - { - let head = repo.head().unwrap().target().unwrap(); - let head = repo.find_commit(head).unwrap(); - - let mut index = repo.index().unwrap(); - let id = index.write_tree().unwrap(); - - let tree = repo.find_tree(id).unwrap(); - let sig = repo.signature().unwrap(); - repo.commit(Some("HEAD"), &sig, &sig, "second", - &tree, &[&head]).unwrap(); - } - (_td, repo) - } - - #[test] - fn smoke_graph_ahead_behind() { - let (_td, repo) = graph_repo_init(); - let head = repo.head().unwrap().target().unwrap(); - let head = repo.find_commit(head).unwrap(); - let head_id = head.id(); - let head_parent_id = head.parent(0).unwrap().id(); - let (ahead, behind) = repo.graph_ahead_behind(head_id, - head_parent_id).unwrap(); - assert_eq!(ahead, 1); - assert_eq!(behind, 0); - let (ahead, behind) = repo.graph_ahead_behind(head_parent_id, - head_id).unwrap(); - assert_eq!(ahead, 0); - assert_eq!(behind, 1); - } - - #[test] - fn smoke_graph_descendant_of() { - let (_td, repo) = graph_repo_init(); - let head = repo.head().unwrap().target().unwrap(); - let head = repo.find_commit(head).unwrap(); - let head_id = head.id(); - let head_parent_id = head.parent(0).unwrap().id(); - assert!(repo.graph_descendant_of(head_id, head_parent_id).unwrap()); - assert!(!repo.graph_descendant_of(head_parent_id, head_id).unwrap()); - } - - #[test] - fn smoke_reference_has_log_ensure_log() { - let (_td, repo) = ::test::repo_init(); - - assert_eq!(repo.reference_has_log("HEAD").unwrap(), true); - assert_eq!(repo.reference_has_log("refs/heads/master").unwrap(), true); - assert_eq!(repo.reference_has_log("NOT_HEAD").unwrap(), false); - let master_oid = repo.revparse_single("master").unwrap().id(); - assert!(repo.reference("NOT_HEAD", master_oid, false, "creating a new branch").is_ok()); - assert_eq!(repo.reference_has_log("NOT_HEAD").unwrap(), false); - assert!(repo.reference_ensure_log("NOT_HEAD").is_ok()); - assert_eq!(repo.reference_has_log("NOT_HEAD").unwrap(), true); - } - - #[test] - fn smoke_set_head() { - let (_td, repo) = ::test::repo_init(); - - assert!(repo.set_head("refs/heads/does-not-exist").is_ok()); - assert!(repo.head().is_err()); - - assert!(repo.set_head("refs/heads/master").is_ok()); - assert!(repo.head().is_ok()); - - assert!(repo.set_head("*").is_err()); - } - - #[test] - fn smoke_set_head_detached() { - let (_td, repo) = ::test::repo_init(); - - let void_oid = Oid::from_bytes(b"00000000000000000000").unwrap(); - assert!(repo.set_head_detached(void_oid).is_err()); - - let master_oid = repo.revparse_single("master").unwrap().id(); - assert!(repo.set_head_detached(master_oid).is_ok()); - assert_eq!(repo.head().unwrap().target().unwrap(), master_oid); - } - - /// create an octopus: - /// /---o2-o4 - /// o1 X - /// \---o3-o5 - /// and checks that the merge bases of (o4,o5) are (o2,o3) - #[test] - fn smoke_merge_bases() { - let (_td, repo) = graph_repo_init(); - let sig = repo.signature().unwrap(); - - // let oid1 = head - let oid1 = repo.head().unwrap().target().unwrap(); - let commit1 = repo.find_commit(oid1).unwrap(); - println!("created oid1 {:?}", oid1); - - repo.branch("branch_a", &commit1, true).unwrap(); - repo.branch("branch_b", &commit1, true).unwrap(); - - // create commit oid2 on branchA - let mut index = repo.index().unwrap(); - let p = Path::new(repo.workdir().unwrap()).join("file_a"); - println!("using path {:?}", p); - fs::File::create(&p).unwrap(); - index.add_path(Path::new("file_a")).unwrap(); - let id_a = index.write_tree().unwrap(); - let tree_a = repo.find_tree(id_a).unwrap(); - let oid2 = repo.commit(Some("refs/heads/branch_a"), &sig, &sig, - "commit 2", &tree_a, &[&commit1]).unwrap(); - let commit2 = repo.find_commit(oid2).unwrap(); - println!("created oid2 {:?}", oid2); - - t!(repo.reset(commit1.as_object(), ResetType::Hard, None)); - - // create commit oid3 on branchB - let mut index = repo.index().unwrap(); - let p = Path::new(repo.workdir().unwrap()).join("file_b"); - fs::File::create(&p).unwrap(); - index.add_path(Path::new("file_b")).unwrap(); - let id_b = index.write_tree().unwrap(); - let tree_b = repo.find_tree(id_b).unwrap(); - let oid3 = repo.commit(Some("refs/heads/branch_b"), &sig, &sig, - "commit 3", &tree_b, &[&commit1]).unwrap(); - let commit3 = repo.find_commit(oid3).unwrap(); - println!("created oid3 {:?}", oid3); - - // create merge commit oid4 on branchA with parents oid2 and oid3 - //let mut index4 = repo.merge_commits(&commit2, &commit3, None).unwrap(); - repo.set_head("refs/heads/branch_a").unwrap(); - repo.checkout_head(None).unwrap(); - let oid4 = repo.commit(Some("refs/heads/branch_a"), &sig, &sig, - "commit 4", &tree_a, - &[&commit2, &commit3]).unwrap(); - //index4.write_tree_to(&repo).unwrap(); - println!("created oid4 {:?}", oid4); - - // create merge commit oid5 on branchB with parents oid2 and oid3 - //let mut index5 = repo.merge_commits(&commit3, &commit2, None).unwrap(); - repo.set_head("refs/heads/branch_b").unwrap(); - repo.checkout_head(None).unwrap(); - let oid5 = repo.commit(Some("refs/heads/branch_b"), &sig, &sig, - "commit 5", &tree_a, - &[&commit3, &commit2]).unwrap(); - //index5.write_tree_to(&repo).unwrap(); - println!("created oid5 {:?}", oid5); - - // merge bases of (oid4,oid5) should be (oid2,oid3) - let merge_bases = repo.merge_bases(oid4, oid5).unwrap(); - let mut found_oid2 = false; - let mut found_oid3 = false; - for mg in merge_bases.iter() { - println!("found merge base {:?}", mg); - if mg == &oid2 { - found_oid2 = true; - } else if mg == &oid3 { - found_oid3 = true; - } else { - assert!(false); - } - } - assert!(found_oid2); - assert!(found_oid3); - assert_eq!(merge_bases.len(), 2); - } - - #[test] - fn smoke_revparse_ext() { - let (_td, repo) = graph_repo_init(); - - { - let short_refname = "master"; - let expected_refname = "refs/heads/master"; - let (obj, reference) = repo.revparse_ext(short_refname).unwrap(); - let expected_obj = repo.revparse_single(expected_refname).unwrap(); - assert_eq!(obj.id(), expected_obj.id()); - assert_eq!(reference.unwrap().name().unwrap(), expected_refname); - } - { - let missing_refname = "refs/heads/does-not-exist"; - assert!(repo.revparse_ext(missing_refname).is_err()); - } - { - let (_obj, reference) = repo.revparse_ext("HEAD^").unwrap(); - assert!(reference.is_none()); - } - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/revspec.rs cargo-0.19.0/vendor/git2-0.6.3/src/revspec.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/revspec.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/revspec.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -use {Object, RevparseMode}; - -/// A revspec represents a range of revisions within a repository. -pub struct Revspec<'repo> { - from: Option>, - to: Option>, - mode: RevparseMode, -} - -impl<'repo> Revspec<'repo> { - /// Assembles a new revspec from the from/to components. - pub fn from_objects(from: Option>, - to: Option>, - mode: RevparseMode) -> Revspec<'repo> { - Revspec { from: from, to: to, mode: mode } - } - - /// Access the `from` range of this revspec. - pub fn from(&self) -> Option<&Object<'repo>> { self.from.as_ref() } - - /// Access the `to` range of this revspec. - pub fn to(&self) -> Option<&Object<'repo>> { self.to.as_ref() } - - /// Returns the intent of the revspec. - pub fn mode(&self) -> RevparseMode { self.mode } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/revwalk.rs cargo-0.19.0/vendor/git2-0.6.3/src/revwalk.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/revwalk.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/revwalk.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,203 +0,0 @@ -use std::marker; -use std::ffi::CString; -use libc::c_uint; - -use {raw, Error, Sort, Oid, Repository}; -use util::Binding; - -/// A revwalk allows traversal of the commit graph defined by including one or -/// more leaves and excluding one or more roots. -pub struct Revwalk<'repo> { - raw: *mut raw::git_revwalk, - _marker: marker::PhantomData<&'repo Repository>, -} - -impl<'repo> Revwalk<'repo> { - /// Reset a revwalk to allow re-configuring it. - /// - /// The revwalk is automatically reset when iteration of its commits - /// completes. - pub fn reset(&mut self) { - unsafe { raw::git_revwalk_reset(self.raw()) } - } - - /// Set the order in which commits are visited. - pub fn set_sorting(&mut self, sort_mode: Sort) { - unsafe { - raw::git_revwalk_sorting(self.raw(), sort_mode.bits() as c_uint) - } - } - - /// Simplify the history by first-parent - /// - /// No parents other than the first for each commit will be enqueued. - pub fn simplify_first_parent(&mut self) { - unsafe { raw::git_revwalk_simplify_first_parent(self.raw) } - } - - /// Mark a commit to start traversal from. - /// - /// The given OID must belong to a committish on the walked repository. - /// - /// The given commit will be used as one of the roots when starting the - /// revision walk. At least one commit must be pushed onto the walker before - /// a walk can be started. - pub fn push(&mut self, oid: Oid) -> Result<(), Error> { - unsafe { - try_call!(raw::git_revwalk_push(self.raw(), oid.raw())); - } - Ok(()) - } - - /// Push the repository's HEAD - /// - /// For more information, see `push`. - pub fn push_head(&mut self) -> Result<(), Error> { - unsafe { - try_call!(raw::git_revwalk_push_head(self.raw())); - } - Ok(()) - } - - /// Push matching references - /// - /// The OIDs pointed to by the references that match the given glob pattern - /// will be pushed to the revision walker. - /// - /// A leading 'refs/' is implied if not present as well as a trailing `/ \ - /// *` if the glob lacks '?', ' \ *' or '['. - /// - /// Any references matching this glob which do not point to a committish - /// will be ignored. - pub fn push_glob(&mut self, glob: &str) -> Result<(), Error> { - let glob = try!(CString::new(glob)); - unsafe { - try_call!(raw::git_revwalk_push_glob(self.raw, glob)); - } - Ok(()) - } - - /// Push and hide the respective endpoints of the given range. - /// - /// The range should be of the form `..` where each - /// `` is in the form accepted by `revparse_single`. The left-hand - /// commit will be hidden and the right-hand commit pushed. - pub fn push_range(&mut self, range: &str) -> Result<(), Error> { - let range = try!(CString::new(range)); - unsafe { - try_call!(raw::git_revwalk_push_range(self.raw, range)); - } - Ok(()) - } - - /// Push the OID pointed to by a reference - /// - /// The reference must point to a committish. - pub fn push_ref(&mut self, reference: &str) -> Result<(), Error> { - let reference = try!(CString::new(reference)); - unsafe { - try_call!(raw::git_revwalk_push_ref(self.raw, reference)); - } - Ok(()) - } - - /// Mark a commit as not of interest to this revwalk. - pub fn hide(&mut self, oid: Oid) -> Result<(), Error> { - unsafe { - try_call!(raw::git_revwalk_hide(self.raw(), oid.raw())); - } - Ok(()) - } - - /// Hide the repository's HEAD - /// - /// For more information, see `hide`. - pub fn hide_head(&mut self) -> Result<(), Error> { - unsafe { - try_call!(raw::git_revwalk_hide_head(self.raw())); - } - Ok(()) - } - - /// Hide matching references. - /// - /// The OIDs pointed to by the references that match the given glob pattern - /// and their ancestors will be hidden from the output on the revision walk. - /// - /// A leading 'refs/' is implied if not present as well as a trailing `/ \ - /// *` if the glob lacks '?', ' \ *' or '['. - /// - /// Any references matching this glob which do not point to a committish - /// will be ignored. - pub fn hide_glob(&mut self, glob: &str) -> Result<(), Error> { - let glob = try!(CString::new(glob)); - unsafe { - try_call!(raw::git_revwalk_hide_glob(self.raw, glob)); - } - Ok(()) - } - - /// Hide the OID pointed to by a reference. - /// - /// The reference must point to a committish. - pub fn hide_ref(&mut self, reference: &str) -> Result<(), Error> { - let reference = try!(CString::new(reference)); - unsafe { - try_call!(raw::git_revwalk_hide_ref(self.raw, reference)); - } - Ok(()) - } -} - -impl<'repo> Binding for Revwalk<'repo> { - type Raw = *mut raw::git_revwalk; - unsafe fn from_raw(raw: *mut raw::git_revwalk) -> Revwalk<'repo> { - Revwalk { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *mut raw::git_revwalk { self.raw } -} - -impl<'repo> Drop for Revwalk<'repo> { - fn drop(&mut self) { - unsafe { raw::git_revwalk_free(self.raw) } - } -} - -impl<'repo> Iterator for Revwalk<'repo> { - type Item = Result; - fn next(&mut self) -> Option> { - let mut out: raw::git_oid = raw::git_oid{ id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call_iter!(raw::git_revwalk_next(&mut out, self.raw())); - Some(Ok(Binding::from_raw(&out as *const _))) - } - } -} - -#[cfg(test)] -mod tests { - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - let head = repo.head().unwrap(); - let target = head.target().unwrap(); - - let mut walk = repo.revwalk().unwrap(); - walk.push(target).unwrap(); - - let oids: Vec<::Oid> = walk.by_ref().collect::, _>>() - .unwrap(); - - assert_eq!(oids.len(), 1); - assert_eq!(oids[0], target); - - walk.reset(); - walk.push_head().unwrap(); - assert_eq!(walk.by_ref().count(), 1); - - walk.reset(); - walk.push_head().unwrap(); - walk.hide_head().unwrap(); - assert_eq!(walk.by_ref().count(), 0); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/signature.rs cargo-0.19.0/vendor/git2-0.6.3/src/signature.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/signature.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/signature.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,174 +0,0 @@ -use std::ffi::CString; -use std::marker; -use std::mem; -use std::str; -use std::fmt; -use libc; - -use {raw, Error, Time}; -use util::Binding; - -/// A Signature is used to indicate authorship of various actions throughout the -/// library. -/// -/// Signatures contain a name, email, and timestamp. All fields can be specified -/// with `new` while the `now` constructor omits the timestamp. The -/// [`Repository::signature`] method can be used to create a default signature -/// with name and email values read from the configuration. -/// -/// [`Repository::signature`]: struct.Repository.html#method.signature -pub struct Signature<'a> { - raw: *mut raw::git_signature, - _marker: marker::PhantomData<&'a str>, - owned: bool, -} - -impl<'a> Signature<'a> { - /// Create a new action signature with a timestamp of 'now'. - /// - /// See `new` for more information - pub fn now(name: &str, email: &str) -> Result, Error> { - ::init(); - let mut ret = 0 as *mut raw::git_signature; - let name = try!(CString::new(name)); - let email = try!(CString::new(email)); - unsafe { - try_call!(raw::git_signature_now(&mut ret, name, email)); - Ok(Binding::from_raw(ret)) - } - } - - /// Create a new action signature. - /// - /// The `time` specified is in seconds since the epoch, and the `offset` is - /// the time zone offset in minutes. - /// - /// Returns error if either `name` or `email` contain angle brackets. - pub fn new(name: &str, email: &str, time: &Time) - -> Result, Error> { - ::init(); - let mut ret = 0 as *mut raw::git_signature; - let name = try!(CString::new(name)); - let email = try!(CString::new(email)); - unsafe { - try_call!(raw::git_signature_new(&mut ret, name, email, - time.seconds() as raw::git_time_t, - time.offset_minutes() as libc::c_int)); - Ok(Binding::from_raw(ret)) - } - } - - /// Gets the name on the signature. - /// - /// Returns `None` if the name is not valid utf-8 - pub fn name(&self) -> Option<&str> { - str::from_utf8(self.name_bytes()).ok() - } - - /// Gets the name on the signature as a byte slice. - pub fn name_bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, (*self.raw).name).unwrap() } - } - - /// Gets the email on the signature. - /// - /// Returns `None` if the email is not valid utf-8 - pub fn email(&self) -> Option<&str> { - str::from_utf8(self.email_bytes()).ok() - } - - /// Gets the email on the signature as a byte slice. - pub fn email_bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, (*self.raw).email).unwrap() } - } - - /// Get the `when` of this signature. - pub fn when(&self) -> Time { - unsafe { Binding::from_raw((*self.raw).when) } - } - - /// Convert a signature of any lifetime into an owned signature with a - /// static lifetime. - pub fn to_owned(&self) -> Signature<'static> { - unsafe { - let me = mem::transmute::<&Signature<'a>, &Signature<'static>>(self); - me.clone() - } - } -} - -impl<'a> Binding for Signature<'a> { - type Raw = *mut raw::git_signature; - unsafe fn from_raw(raw: *mut raw::git_signature) -> Signature<'a> { - Signature { - raw: raw, - _marker: marker::PhantomData, - owned: true, - } - } - fn raw(&self) -> *mut raw::git_signature { self.raw } -} - -/// Creates a new signature from the give raw pointer, tied to the lifetime -/// of the given object. -/// -/// This function is unsafe as there is no guarantee that `raw` is valid for -/// `'a` nor if it's a valid pointer. -pub unsafe fn from_raw_const<'b, T>(_lt: &'b T, - raw: *const raw::git_signature) - -> Signature<'b> { - Signature { - raw: raw as *mut raw::git_signature, - _marker: marker::PhantomData, - owned: false, - } -} - -impl Clone for Signature<'static> { - fn clone(&self) -> Signature<'static> { - // TODO: can this be defined for 'a and just do a plain old copy if the - // lifetime isn't static? - let mut raw = 0 as *mut raw::git_signature; - let rc = unsafe { raw::git_signature_dup(&mut raw, &*self.raw) }; - assert_eq!(rc, 0); - unsafe { Binding::from_raw(raw) } - } -} - -impl<'a> Drop for Signature<'a> { - fn drop(&mut self) { - if self.owned { - unsafe { raw::git_signature_free(self.raw) } - } - } -} - -impl<'a> fmt::Display for Signature<'a> { - - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{} <{}>", - String::from_utf8_lossy(self.name_bytes()), - String::from_utf8_lossy(self.email_bytes())) - } - -} - -#[cfg(test)] -mod tests { - use {Signature, Time}; - - #[test] - fn smoke() { - Signature::new("foo", "bar", &Time::new(89, 0)).unwrap(); - Signature::now("foo", "bar").unwrap(); - assert!(Signature::new("", "bar", &Time::new(89, 0)).is_err()); - assert!(Signature::now("", "bar").is_err()); - - let s = Signature::now("foo", "bar").unwrap(); - assert_eq!(s.name(), Some("foo")); - assert_eq!(s.email(), Some("bar")); - - drop(s.clone()); - drop(s.to_owned()); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/status.rs cargo-0.19.0/vendor/git2-0.6.3/src/status.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/status.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/status.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,396 +0,0 @@ -use std::ffi::CString; -use std::ops::Range; -use std::marker; -use std::mem; -use std::str; -use libc::{c_char, size_t, c_uint}; - -use {raw, Status, DiffDelta, IntoCString, Repository}; -use util::Binding; - -/// Options that can be provided to `repo.statuses()` to control how the status -/// information is gathered. -pub struct StatusOptions { - raw: raw::git_status_options, - pathspec: Vec, - ptrs: Vec<*const c_char>, -} - -/// Enumeration of possible methods of what can be shown through a status -/// operation. -#[derive(Copy, Clone)] -pub enum StatusShow { - /// Only gives status based on HEAD to index comparison, not looking at - /// working directory changes. - Index, - - /// Only gives status based on index to working directory comparison, not - /// comparing the index to the HEAD. - Workdir, - - /// The default, this roughly matches `git status --porcelain` regarding - /// which files are included and in what order. - IndexAndWorkdir, -} - -/// A container for a list of status information about a repository. -/// -/// Each instances appears as a if it were a collection, having a length and -/// allowing indexing as well as provding an iterator. -pub struct Statuses<'repo> { - raw: *mut raw::git_status_list, - - // Hm, not currently present, but can't hurt? - _marker: marker::PhantomData<&'repo Repository>, -} - -/// An iterator over the statuses in a `Statuses` instance. -pub struct StatusIter<'statuses> { - statuses: &'statuses Statuses<'statuses>, - range: Range, -} - -/// A structure representing an entry in the `Statuses` structure. -/// -/// Instances are created through the `.iter()` method or the `.get()` method. -pub struct StatusEntry<'statuses> { - raw: *const raw::git_status_entry, - _marker: marker::PhantomData<&'statuses DiffDelta<'statuses>>, -} - -impl StatusOptions { - /// Creates a new blank set of status options. - pub fn new() -> StatusOptions { - unsafe { - let mut raw = mem::zeroed(); - let r = raw::git_status_init_options(&mut raw, - raw::GIT_STATUS_OPTIONS_VERSION); - assert_eq!(r, 0); - StatusOptions { - raw: raw, - pathspec: Vec::new(), - ptrs: Vec::new(), - } - } - } - - /// Select the files on which to report status. - /// - /// The default, if unspecified, is to show the index and the working - /// directory. - pub fn show(&mut self, show: StatusShow) -> &mut StatusOptions { - self.raw.show = match show { - StatusShow::Index => raw::GIT_STATUS_SHOW_INDEX_ONLY, - StatusShow::Workdir => raw::GIT_STATUS_SHOW_WORKDIR_ONLY, - StatusShow::IndexAndWorkdir => raw::GIT_STATUS_SHOW_INDEX_AND_WORKDIR, - }; - self - } - - /// Add a path pattern to match (using fnmatch-style matching). - /// - /// If the `disable_pathspec_match` option is given, then this is a literal - /// path to match. If this is not called, then there will be no patterns to - /// match and the entire directory will be used. - pub fn pathspec(&mut self, pathspec: T) - -> &mut StatusOptions { - let s = pathspec.into_c_string().unwrap(); - self.ptrs.push(s.as_ptr()); - self.pathspec.push(s); - self - } - - fn flag(&mut self, flag: raw::git_status_opt_t, val: bool) - -> &mut StatusOptions { - if val { - self.raw.flags |= flag as c_uint; - } else { - self.raw.flags &= !(flag as c_uint); - } - self - } - - /// Flag whether untracked files will be included. - /// - /// Untracked files will only be included if the workdir files are included - /// in the status "show" option. - pub fn include_untracked(&mut self, include: bool) -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNTRACKED, include) - } - - /// Flag whether ignored files will be included. - /// - /// The files will only be included if the workdir files are included - /// in the status "show" option. - pub fn include_ignored(&mut self, include: bool) -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_INCLUDE_IGNORED, include) - } - - /// Flag to include unmodified files. - pub fn include_unmodified(&mut self, include: bool) -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNMODIFIED, include) - } - - /// Flag that submodules should be skipped. - /// - /// This only applies if there are no pending typechanges to the submodule - /// (either from or to another type). - pub fn exclude_submodules(&mut self, exclude: bool) -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_EXCLUDE_SUBMODULES, exclude) - } - - /// Flag that all files in untracked directories should be included. - /// - /// Normally if an entire directory is new then just the top-level directory - /// is included (with a trailing slash on the entry name). - pub fn recurse_untracked_dirs(&mut self, include: bool) - -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_RECURSE_UNTRACKED_DIRS, include) - } - - /// Indicates that the given paths should be treated as literals paths, note - /// patterns. - pub fn disable_pathspec_match(&mut self, include: bool) - -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_DISABLE_PATHSPEC_MATCH, include) - } - - /// Indicates that the contents of ignored directories should be included in - /// the status. - pub fn recurse_ignored_dirs(&mut self, include: bool) - -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_RECURSE_IGNORED_DIRS, include) - } - - /// Indicates that rename detection should be processed between the head. - pub fn renames_head_to_index(&mut self, include: bool) - -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_RENAMES_HEAD_TO_INDEX, include) - } - - /// Indicates that rename detection should be run between the index and the - /// working directory. - pub fn renames_index_to_workdir(&mut self, include: bool) - -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_RENAMES_INDEX_TO_WORKDIR, include) - } - - /// Override the native case sensitivity for the file system and force the - /// output to be in case sensitive order. - pub fn sort_case_sensitively(&mut self, include: bool) - -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_SORT_CASE_SENSITIVELY, include) - } - - /// Override the native case sensitivity for the file system and force the - /// output to be in case-insensitive order. - pub fn sort_case_insensitively(&mut self, include: bool) - -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_SORT_CASE_INSENSITIVELY, include) - } - - /// Indicates that rename detection should include rewritten files. - pub fn renames_from_rewrites(&mut self, include: bool) - -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_RENAMES_FROM_REWRITES, include) - } - - /// Bypasses the default status behavior of doing a "soft" index reload. - pub fn no_refresh(&mut self, include: bool) -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_NO_REFRESH, include) - } - - /// Refresh the stat cache in the index for files are unchanged but have - /// out of date stat information in the index. - /// - /// This will result in less work being done on subsequent calls to fetching - /// the status. - pub fn update_index(&mut self, include: bool) -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_UPDATE_INDEX, include) - } - - // erm... - #[allow(missing_docs)] - pub fn include_unreadable(&mut self, include: bool) -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNREADABLE, include) - } - - // erm... - #[allow(missing_docs)] - pub fn include_unreadable_as_untracked(&mut self, include: bool) - -> &mut StatusOptions { - self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNREADABLE_AS_UNTRACKED, include) - } - - /// Get a pointer to the inner list of status options. - /// - /// This function is unsafe as the returned structure has interior pointers - /// and may no longer be valid if these options continue to be mutated. - pub unsafe fn raw(&mut self) -> *const raw::git_status_options { - self.raw.pathspec.strings = self.ptrs.as_ptr() as *mut _; - self.raw.pathspec.count = self.ptrs.len() as size_t; - &self.raw - } -} - -impl<'repo> Statuses<'repo> { - /// Gets a status entry from this list at the specified index. - /// - /// Returns `None` if the index is out of bounds. - pub fn get(&self, index: usize) -> Option { - unsafe { - let p = raw::git_status_byindex(self.raw, index as size_t); - Binding::from_raw_opt(p) - } - } - - /// Gets the count of status entries in this list. - /// - /// If there are no changes in status (at least according the options given - /// when the status list was created), this can return 0. - pub fn len(&self) -> usize { - unsafe { raw::git_status_list_entrycount(self.raw) as usize } - } - - /// Returns an iterator over the statuses in this list. - pub fn iter(&self) -> StatusIter { - StatusIter { - statuses: self, - range: 0..self.len(), - } - } -} - -impl<'repo> Binding for Statuses<'repo> { - type Raw = *mut raw::git_status_list; - unsafe fn from_raw(raw: *mut raw::git_status_list) -> Statuses<'repo> { - Statuses { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *mut raw::git_status_list { self.raw } -} - -impl<'repo> Drop for Statuses<'repo> { - fn drop(&mut self) { - unsafe { raw::git_status_list_free(self.raw); } - } -} - -impl<'a> Iterator for StatusIter<'a> { - type Item = StatusEntry<'a>; - fn next(&mut self) -> Option> { - self.range.next().and_then(|i| self.statuses.get(i)) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} -impl<'a> DoubleEndedIterator for StatusIter<'a> { - fn next_back(&mut self) -> Option> { - self.range.next_back().and_then(|i| self.statuses.get(i)) - } -} -impl<'a> ExactSizeIterator for StatusIter<'a> {} - -impl<'statuses> StatusEntry<'statuses> { - /// Access the bytes for this entry's corresponding pathname - pub fn path_bytes(&self) -> &[u8] { - unsafe { - if (*self.raw).head_to_index.is_null() { - ::opt_bytes(self, (*(*self.raw).index_to_workdir).old_file.path) - } else { - ::opt_bytes(self, (*(*self.raw).head_to_index).old_file.path) - }.unwrap() - } - } - - /// Access this entry's path name as a string. - /// - /// Returns `None` if the path is not valid utf-8. - pub fn path(&self) -> Option<&str> { str::from_utf8(self.path_bytes()).ok() } - - /// Access the status flags for this file - pub fn status(&self) -> Status { - Status::from_bits_truncate(unsafe { (*self.raw).status as u32 }) - } - - /// Access detailed information about the differences between the file in - /// HEAD and the file in the index. - pub fn head_to_index(&self) -> Option> { - unsafe { - Binding::from_raw_opt((*self.raw).head_to_index) - } - } - - /// Access detailed information about the differences between the file in - /// the index and the file in the working directory. - pub fn index_to_workdir(&self) -> Option> { - unsafe { - Binding::from_raw_opt((*self.raw).index_to_workdir) - } - } -} - -impl<'statuses> Binding for StatusEntry<'statuses> { - type Raw = *const raw::git_status_entry; - - unsafe fn from_raw(raw: *const raw::git_status_entry) - -> StatusEntry<'statuses> { - StatusEntry { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *const raw::git_status_entry { self.raw } -} - -#[cfg(test)] -mod tests { - use std::fs::File; - use std::path::Path; - use std::io::prelude::*; - use super::StatusOptions; - - #[test] - fn smoke() { - let (td, repo) = ::test::repo_init(); - assert_eq!(repo.statuses(None).unwrap().len(), 0); - File::create(&td.path().join("foo")).unwrap(); - let statuses = repo.statuses(None).unwrap(); - assert_eq!(statuses.iter().count(), 1); - let status = statuses.iter().next().unwrap(); - assert_eq!(status.path(), Some("foo")); - assert!(status.status().contains(::STATUS_WT_NEW)); - assert!(!status.status().contains(::STATUS_INDEX_NEW)); - assert!(status.head_to_index().is_none()); - let diff = status.index_to_workdir().unwrap(); - assert_eq!(diff.old_file().path_bytes().unwrap(), b"foo"); - assert_eq!(diff.new_file().path_bytes().unwrap(), b"foo"); - } - - #[test] - fn filter() { - let (td, repo) = ::test::repo_init(); - t!(File::create(&td.path().join("foo"))); - t!(File::create(&td.path().join("bar"))); - let mut opts = StatusOptions::new(); - opts.include_untracked(true) - .pathspec("foo"); - - let statuses = t!(repo.statuses(Some(&mut opts))); - assert_eq!(statuses.iter().count(), 1); - let status = statuses.iter().next().unwrap(); - assert_eq!(status.path(), Some("foo")); - } - - #[test] - fn gitignore() { - let (td, repo) = ::test::repo_init(); - t!(t!(File::create(td.path().join(".gitignore"))).write_all(b"foo\n")); - assert!(!t!(repo.status_should_ignore(Path::new("bar")))); - assert!(t!(repo.status_should_ignore(Path::new("foo")))); - } - - #[test] - fn status_file() { - let (td, repo) = ::test::repo_init(); - assert!(repo.status_file(Path::new("foo")).is_err()); - t!(File::create(td.path().join("foo"))); - let status = t!(repo.status_file(Path::new("foo"))); - assert!(status.contains(::STATUS_WT_NEW)); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/string_array.rs cargo-0.19.0/vendor/git2-0.6.3/src/string_array.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/string_array.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/string_array.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -//! Bindings to libgit2's raw git_strarray type - -use std::str; -use std::ops::Range; - -use raw; -use util::Binding; - -/// A string array structure used by libgit2 -/// -/// Some apis return arrays of strings which originate from libgit2. This -/// wrapper type behaves a little like `Vec<&str>` but does so without copying -/// the underlying strings until necessary. -pub struct StringArray { - raw: raw::git_strarray, -} - -/// A forward iterator over the strings of an array, casted to `&str`. -pub struct Iter<'a> { - range: Range, - arr: &'a StringArray, -} - -/// A forward iterator over the strings of an array, casted to `&[u8]`. -pub struct IterBytes<'a> { - range: Range, - arr: &'a StringArray, -} - -impl StringArray { - /// Returns None if the i'th string is not utf8 or if i is out of bounds. - pub fn get(&self, i: usize) -> Option<&str> { - self.get_bytes(i).and_then(|s| str::from_utf8(s).ok()) - } - - /// Returns None if `i` is out of bounds. - pub fn get_bytes(&self, i: usize) -> Option<&[u8]> { - if i < self.raw.count as usize { - unsafe { - let ptr = *self.raw.strings.offset(i as isize) as *const _; - Some(::opt_bytes(self, ptr).unwrap()) - } - } else { - None - } - } - - /// Returns an iterator over the strings contained within this array. - /// - /// The iterator yields `Option<&str>` as it is unknown whether the contents - /// are utf-8 or not. - pub fn iter(&self) -> Iter { - Iter { range: 0..self.len(), arr: self } - } - - /// Returns an iterator over the strings contained within this array, - /// yielding byte slices. - pub fn iter_bytes(&self) -> IterBytes { - IterBytes { range: 0..self.len(), arr: self } - } - - /// Returns the number of strings in this array. - pub fn len(&self) -> usize { self.raw.count as usize } -} - -impl Binding for StringArray { - type Raw = raw::git_strarray; - unsafe fn from_raw(raw: raw::git_strarray) -> StringArray { - StringArray { raw: raw } - } - fn raw(&self) -> raw::git_strarray { self.raw } -} - -impl<'a> Iterator for Iter<'a> { - type Item = Option<&'a str>; - fn next(&mut self) -> Option> { - self.range.next().map(|i| self.arr.get(i)) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} -impl<'a> DoubleEndedIterator for Iter<'a> { - fn next_back(&mut self) -> Option> { - self.range.next_back().map(|i| self.arr.get(i)) - } -} -impl<'a> ExactSizeIterator for Iter<'a> {} - -impl<'a> Iterator for IterBytes<'a> { - type Item = &'a [u8]; - fn next(&mut self) -> Option<&'a [u8]> { - self.range.next().and_then(|i| self.arr.get_bytes(i)) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} -impl<'a> DoubleEndedIterator for IterBytes<'a> { - fn next_back(&mut self) -> Option<&'a [u8]> { - self.range.next_back().and_then(|i| self.arr.get_bytes(i)) - } -} -impl<'a> ExactSizeIterator for IterBytes<'a> {} - -impl Drop for StringArray { - fn drop(&mut self) { - unsafe { raw::git_strarray_free(&mut self.raw) } - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/submodule.rs cargo-0.19.0/vendor/git2-0.6.3/src/submodule.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/submodule.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/submodule.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,233 +0,0 @@ -use std::marker; -use std::str; -use std::path::Path; - -use {raw, Oid, Repository, Error}; -use util::{self, Binding}; - -/// A structure to represent a git [submodule][1] -/// -/// [1]: http://git-scm.com/book/en/Git-Tools-Submodules -pub struct Submodule<'repo> { - raw: *mut raw::git_submodule, - _marker: marker::PhantomData<&'repo Repository>, -} - -impl<'repo> Submodule<'repo> { - /// Get the submodule's branch. - /// - /// Returns `None` if the branch is not valid utf-8 or if the branch is not - /// yet available. - pub fn branch(&self) -> Option<&str> { - self.branch_bytes().and_then(|s| str::from_utf8(s).ok()) - } - - /// Get the branch for the submodule. - /// - /// Returns `None` if the branch is not yet available. - pub fn branch_bytes(&self) -> Option<&[u8]> { - unsafe { - ::opt_bytes(self, raw::git_submodule_branch(self.raw)) - } - } - - /// Get the submodule's url. - /// - /// Returns `None` if the url is not valid utf-8 - pub fn url(&self) -> Option<&str> { str::from_utf8(self.url_bytes()).ok() } - - /// Get the url for the submodule. - pub fn url_bytes(&self) -> &[u8] { - unsafe { - ::opt_bytes(self, raw::git_submodule_url(self.raw)).unwrap() - } - } - - /// Get the submodule's name. - /// - /// Returns `None` if the name is not valid utf-8 - pub fn name(&self) -> Option<&str> { str::from_utf8(self.name_bytes()).ok() } - - /// Get the name for the submodule. - pub fn name_bytes(&self) -> &[u8] { - unsafe { - ::opt_bytes(self, raw::git_submodule_name(self.raw)).unwrap() - } - } - - /// Get the path for the submodule. - pub fn path(&self) -> &Path { - util::bytes2path(unsafe { - ::opt_bytes(self, raw::git_submodule_path(self.raw)).unwrap() - }) - } - - /// Get the OID for the submodule in the current HEAD tree. - pub fn head_id(&self) -> Option { - unsafe { - Binding::from_raw_opt(raw::git_submodule_head_id(self.raw)) - } - } - - /// Get the OID for the submodule in the index. - pub fn index_id(&self) -> Option { - unsafe { - Binding::from_raw_opt(raw::git_submodule_index_id(self.raw)) - } - } - - /// Get the OID for the submodule in the current working directory. - /// - /// This returns the OID that corresponds to looking up 'HEAD' in the - /// checked out submodule. If there are pending changes in the index or - /// anything else, this won't notice that. - pub fn workdir_id(&self) -> Option { - unsafe { - Binding::from_raw_opt(raw::git_submodule_wd_id(self.raw)) - } - } - - /// Copy submodule info into ".git/config" file. - /// - /// Just like "git submodule init", this copies information about the - /// submodule into ".git/config". You can use the accessor functions above - /// to alter the in-memory git_submodule object and control what is written - /// to the config, overriding what is in .gitmodules. - /// - /// By default, existing entries will not be overwritten, but passing `true` - /// for `overwrite` forces them to be updated. - pub fn init(&mut self, overwrite: bool) -> Result<(), Error> { - unsafe { - try_call!(raw::git_submodule_init(self.raw, overwrite)); - } - Ok(()) - } - - /// Open the repository for a submodule. - /// - /// This will only work if the submodule is checked out into the working - /// directory. - pub fn open(&self) -> Result { - let mut raw = 0 as *mut raw::git_repository; - unsafe { - try_call!(raw::git_submodule_open(&mut raw, self.raw)); - Ok(Binding::from_raw(raw)) - } - } - - /// Reread submodule info from config, index, and HEAD. - /// - /// Call this to reread cached submodule information for this submodule if - /// you have reason to believe that it has changed. - /// - /// If `force` is `true`, then data will be reloaded even if it doesn't seem - /// out of date - pub fn reload(&mut self, force: bool) -> Result<(), Error> { - unsafe { - try_call!(raw::git_submodule_reload(self.raw, force)); - } - Ok(()) - } - - /// Copy submodule remote info into submodule repo. - /// - /// This copies the information about the submodules URL into the checked - /// out submodule config, acting like "git submodule sync". This is useful - /// if you have altered the URL for the submodule (or it has been altered - /// by a fetch of upstream changes) and you need to update your local repo. - pub fn sync(&mut self) -> Result<(), Error> { - unsafe { try_call!(raw::git_submodule_sync(self.raw)); } - Ok(()) - } - - /// Add current submodule HEAD commit to index of superproject. - /// - /// If `write_index` is true, then the index file will be immediately - /// written. Otherwise you must explicitly call `write()` on an `Index` - /// later on. - pub fn add_to_index(&mut self, write_index: bool) -> Result<(), Error> { - unsafe { - try_call!(raw::git_submodule_add_to_index(self.raw, write_index)); - } - Ok(()) - } - - /// Resolve the setup of a new git submodule. - /// - /// This should be called on a submodule once you have called add setup and - /// done the clone of the submodule. This adds the .gitmodules file and the - /// newly cloned submodule to the index to be ready to be committed (but - /// doesn't actually do the commit). - pub fn add_finalize(&mut self) -> Result<(), Error> { - unsafe { try_call!(raw::git_submodule_add_finalize(self.raw)); } - Ok(()) - } -} - -impl<'repo> Binding for Submodule<'repo> { - type Raw = *mut raw::git_submodule; - unsafe fn from_raw(raw: *mut raw::git_submodule) -> Submodule<'repo> { - Submodule { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *mut raw::git_submodule { self.raw } -} - -impl<'repo> Drop for Submodule<'repo> { - fn drop(&mut self) { - unsafe { raw::git_submodule_free(self.raw) } - } -} - -#[cfg(test)] -mod tests { - use std::path::Path; - use std::fs; - use tempdir::TempDir; - use url::Url; - - use Repository; - - #[test] - fn smoke() { - let td = TempDir::new("test").unwrap(); - let repo = Repository::init(td.path()).unwrap(); - let mut s1 = repo.submodule("/path/to/nowhere", - Path::new("foo"), true).unwrap(); - s1.init(false).unwrap(); - s1.sync().unwrap(); - - let s2 = repo.submodule("/path/to/nowhere", - Path::new("bar"), true).unwrap(); - drop((s1, s2)); - - let mut submodules = repo.submodules().unwrap(); - assert_eq!(submodules.len(), 2); - let mut s = submodules.remove(0); - assert_eq!(s.name(), Some("bar")); - assert_eq!(s.url(), Some("/path/to/nowhere")); - assert_eq!(s.branch(), None); - assert!(s.head_id().is_none()); - assert!(s.index_id().is_none()); - assert!(s.workdir_id().is_none()); - - repo.find_submodule("bar").unwrap(); - s.open().unwrap(); - assert!(s.path() == Path::new("bar")); - s.reload(true).unwrap(); - } - - #[test] - fn add_a_submodule() { - let (_td, repo1) = ::test::repo_init(); - let (td, repo2) = ::test::repo_init(); - - let url = Url::from_file_path(&repo1.workdir().unwrap()).unwrap(); - let mut s = repo2.submodule(&url.to_string(), Path::new("bar"), - true).unwrap(); - t!(fs::remove_dir_all(td.path().join("bar"))); - t!(Repository::clone(&url.to_string(), - td.path().join("bar"))); - t!(s.add_to_index(false)); - t!(s.add_finalize()); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/tag.rs cargo-0.19.0/vendor/git2-0.6.3/src/tag.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/tag.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/tag.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,173 +0,0 @@ -use std::marker; -use std::mem; -use std::str; - -use {raw, signature, Error, Oid, Object, Signature, ObjectType}; -use util::Binding; - -/// A structure to represent a git [tag][1] -/// -/// [1]: http://git-scm.com/book/en/Git-Basics-Tagging -pub struct Tag<'repo> { - raw: *mut raw::git_tag, - _marker: marker::PhantomData>, -} - -impl<'repo> Tag<'repo> { - /// Get the id (SHA1) of a repository tag - pub fn id(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_tag_id(&*self.raw)) } - } - - /// Get the message of a tag - /// - /// Returns None if there is no message or if it is not valid utf8 - pub fn message(&self) -> Option<&str> { - self.message_bytes().and_then(|s| str::from_utf8(s).ok()) - } - - /// Get the message of a tag - /// - /// Returns None if there is no message - pub fn message_bytes(&self) -> Option<&[u8]> { - unsafe { ::opt_bytes(self, raw::git_tag_message(&*self.raw)) } - } - - /// Get the name of a tag - /// - /// Returns None if it is not valid utf8 - pub fn name(&self) -> Option<&str> { - str::from_utf8(self.name_bytes()).ok() - } - - /// Get the name of a tag - pub fn name_bytes(&self) -> &[u8] { - unsafe { ::opt_bytes(self, raw::git_tag_name(&*self.raw)).unwrap() } - } - - /// Recursively peel a tag until a non tag git_object is found - pub fn peel(&self) -> Result, Error> { - let mut ret = 0 as *mut raw::git_object; - unsafe { - try_call!(raw::git_tag_peel(&mut ret, &*self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// Get the tagger (author) of a tag - /// - /// If the author is unspecified, then `None` is returned. - pub fn tagger(&self) -> Option { - unsafe { - let ptr = raw::git_tag_tagger(&*self.raw); - if ptr.is_null() { - None - } else { - Some(signature::from_raw_const(self, ptr)) - } - } - } - - /// Get the tagged object of a tag - /// - /// This method performs a repository lookup for the given object and - /// returns it - pub fn target(&self) -> Result, Error> { - let mut ret = 0 as *mut raw::git_object; - unsafe { - try_call!(raw::git_tag_target(&mut ret, &*self.raw)); - Ok(Binding::from_raw(ret)) - } - } - - /// Get the OID of the tagged object of a tag - pub fn target_id(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_tag_target_id(&*self.raw)) } - } - - /// Get the OID of the tagged object of a tag - pub fn target_type(&self) -> Option { - unsafe { ObjectType::from_raw(raw::git_tag_target_type(&*self.raw)) } - } - - /// Casts this Tag to be usable as an `Object` - pub fn as_object(&self) -> &Object<'repo> { - unsafe { - &*(self as *const _ as *const Object<'repo>) - } - } - - /// Consumes Tag to be returned as an `Object` - pub fn into_object(self) -> Object<'repo> { - assert_eq!(mem::size_of_val(&self), mem::size_of::()); - unsafe { - mem::transmute(self) - } - } -} - -impl<'repo> Binding for Tag<'repo> { - type Raw = *mut raw::git_tag; - unsafe fn from_raw(raw: *mut raw::git_tag) -> Tag<'repo> { - Tag { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *mut raw::git_tag { self.raw } -} - -impl<'repo> Drop for Tag<'repo> { - fn drop(&mut self) { - unsafe { raw::git_tag_free(self.raw) } - } -} - -#[cfg(test)] -mod tests { - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - let head = repo.head().unwrap(); - let id = head.target().unwrap(); - assert!(repo.find_tag(id).is_err()); - - let obj = repo.find_object(id, None).unwrap(); - let sig = repo.signature().unwrap(); - let tag_id = repo.tag("foo", &obj, &sig, "msg", false).unwrap(); - let tag = repo.find_tag(tag_id).unwrap(); - assert_eq!(tag.id(), tag_id); - - let tags = repo.tag_names(None).unwrap(); - assert_eq!(tags.len(), 1); - assert_eq!(tags.get(0), Some("foo")); - - assert_eq!(tag.name(), Some("foo")); - assert_eq!(tag.message(), Some("msg")); - assert_eq!(tag.peel().unwrap().id(), obj.id()); - assert_eq!(tag.target_id(), obj.id()); - assert_eq!(tag.target_type(), Some(::ObjectType::Commit)); - - assert_eq!(tag.tagger().unwrap().name(), sig.name()); - tag.target().unwrap(); - tag.into_object(); - - repo.find_object(tag_id, None).unwrap().as_tag().unwrap(); - repo.find_object(tag_id, None).unwrap().into_tag().ok().unwrap(); - - repo.tag_delete("foo").unwrap(); - } - - #[test] - fn lite() { - let (_td, repo) = ::test::repo_init(); - let head = t!(repo.head()); - let id = head.target().unwrap(); - let obj = t!(repo.find_object(id, None)); - let tag_id = t!(repo.tag_lightweight("foo", &obj, false)); - assert!(repo.find_tag(tag_id).is_err()); - assert_eq!(t!(repo.refname_to_id("refs/tags/foo")), id); - - let tags = t!(repo.tag_names(Some("f*"))); - assert_eq!(tags.len(), 1); - let tags = t!(repo.tag_names(Some("b*"))); - assert_eq!(tags.len(), 0); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/test.rs cargo-0.19.0/vendor/git2-0.6.3/src/test.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/test.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/test.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -use std::path::{Path, PathBuf}; -use std::io; -use tempdir::TempDir; -use url::Url; - -use Repository; - -macro_rules! t { - ($e:expr) => (match $e { - Ok(e) => e, - Err(e) => panic!("{} failed with {}", stringify!($e), e), - }) -} - -pub fn repo_init() -> (TempDir, Repository) { - let td = TempDir::new("test").unwrap(); - let repo = Repository::init(td.path()).unwrap(); - { - let mut config = repo.config().unwrap(); - config.set_str("user.name", "name").unwrap(); - config.set_str("user.email", "email").unwrap(); - let mut index = repo.index().unwrap(); - let id = index.write_tree().unwrap(); - - let tree = repo.find_tree(id).unwrap(); - let sig = repo.signature().unwrap(); - repo.commit(Some("HEAD"), &sig, &sig, "initial", - &tree, &[]).unwrap(); - } - (td, repo) -} - -pub fn path2url(path: &Path) -> String { - Url::from_file_path(path).unwrap().to_string() -} - -#[cfg(windows)] -pub fn realpath(original: &Path) -> io::Result { - Ok(original.to_path_buf()) -} -#[cfg(unix)] -pub fn realpath(original: &Path) -> io::Result { - use std::ffi::{CStr, OsString, CString}; - use std::os::unix::prelude::*; - use libc::{self, c_char}; - extern { - fn realpath(name: *const c_char, resolved: *mut c_char) -> *mut c_char; - } - unsafe { - let cstr = try!(CString::new(original.as_os_str().as_bytes())); - let ptr = realpath(cstr.as_ptr(), 0 as *mut _); - if ptr.is_null() { - return Err(io::Error::last_os_error()) - } - let bytes = CStr::from_ptr(ptr).to_bytes().to_vec(); - libc::free(ptr as *mut _); - Ok(PathBuf::from(OsString::from_vec(bytes))) - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/time.rs cargo-0.19.0/vendor/git2-0.6.3/src/time.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/time.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/time.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -use std::cmp::Ordering; - -use libc::c_int; - -use raw; -use util::Binding; - -/// Time in a signature -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct Time { - raw: raw::git_time, -} - -/// Time structure used in a git index entry. -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct IndexTime { - raw: raw::git_index_time, -} - -impl Time { - /// Creates a new time structure from its components. - pub fn new(time: i64, offset: i32) -> Time { - unsafe { - Binding::from_raw(raw::git_time { - time: time as raw::git_time_t, - offset: offset as c_int, - }) - } - } - - /// Return the time, in seconds, from epoch - pub fn seconds(&self) -> i64 { self.raw.time as i64 } - - /// Return the timezone offset, in minutes - pub fn offset_minutes(&self) -> i32 { self.raw.offset as i32 } -} - -impl PartialOrd for Time { - fn partial_cmp(&self, other: &Time) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Time { - fn cmp(&self, other: &Time) -> Ordering { - (self.raw.time, self.raw.offset).cmp(&(other.raw.time, other.raw.offset)) - } -} - -impl Binding for Time { - type Raw = raw::git_time; - unsafe fn from_raw(raw: raw::git_time) -> Time { - Time { raw: raw } - } - fn raw(&self) -> raw::git_time { self.raw } -} - -impl IndexTime { - /// Creates a new time structure from its components. - pub fn new(seconds: i32, nanoseconds: u32) -> IndexTime { - unsafe { - Binding::from_raw(raw::git_index_time { - seconds: seconds, - nanoseconds: nanoseconds, - }) - } - } - - /// Returns the number of seconds in the second component of this time. - pub fn seconds(&self) -> i32 { self.raw.seconds } - /// Returns the nanosecond component of this time. - pub fn nanoseconds(&self) -> u32 { self.raw.nanoseconds } -} - -impl Binding for IndexTime { - type Raw = raw::git_index_time; - unsafe fn from_raw(raw: raw::git_index_time) -> IndexTime { - IndexTime { raw: raw } - } - fn raw(&self) -> raw::git_index_time { self.raw } -} - -impl PartialOrd for IndexTime { - fn partial_cmp(&self, other: &IndexTime) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for IndexTime { - fn cmp(&self, other: &IndexTime) -> Ordering { - let me = (self.raw.seconds, self.raw.nanoseconds); - let other = (other.raw.seconds, other.raw.nanoseconds); - me.cmp(&other) - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/transport.rs cargo-0.19.0/vendor/git2-0.6.3/src/transport.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/transport.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/transport.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,325 +0,0 @@ -//! Interfaces for adding custom transports to libgit2 - -use std::ffi::{CStr, CString}; -use std::io::prelude::*; -use std::io; -use std::mem; -use std::slice; -use std::str; -use libc::{c_int, c_void, c_uint, c_char, size_t}; - -use {raw, panic, Error, Remote}; -use util::Binding; - -/// A transport is a structure which knows how to transfer data to and from a -/// remote. -/// -/// This transport is a representation of the raw transport underneath it, which -/// is similar to a trait object in Rust. -#[allow(missing_copy_implementations)] -pub struct Transport { - raw: *mut raw::git_transport, - owned: bool, -} - -/// Interfaced used by smart transports. -/// -/// The full-fledged definiton of transports has to deal with lots of -/// nitty-gritty details of the git protocol, but "smart transports" largely -/// only need to deal with read() and write() of data over a channel. -/// -/// A smart subtransport is contained within an instance of a smart transport -/// and is delegated to in order to actually conduct network activity to push or -/// pull data from a remote. -pub trait SmartSubtransport: Send + 'static { - /// Indicates that this subtransport will be performing the specified action - /// on the specified URL. - /// - /// This function is responsible for making any network connections and - /// returns a stream which can be read and written from in order to - /// negotiate the git protocol. - fn action(&self, url: &str, action: Service) - -> Result, Error>; - - /// Terminates a connection with the remote. - /// - /// Each subtransport is guaranteed a call to close() between calls to - /// action(), except for the following tow natural progressions of actions - /// against a constant URL. - /// - /// 1. UploadPackLs -> UploadPack - /// 2. ReceivePackLs -> ReceivePack - fn close(&self) -> Result<(), Error>; -} - -/// Actions that a smart transport can ask a subtransport to perform -#[derive(Copy, Clone)] -#[allow(missing_docs)] -pub enum Service { - UploadPackLs, - UploadPack, - ReceivePackLs, - ReceivePack, -} - -/// An instance of a stream over which a smart transport will communicate with a -/// remote. -/// -/// Currently this only requires the standard `Read` and `Write` traits. This -/// trait also does not need to be implemented manually as long as the `Read` -/// and `Write` traits are implemented. -pub trait SmartSubtransportStream: Read + Write + Send + 'static {} - -impl SmartSubtransportStream for T {} - -type TransportFactory = Fn(&Remote) -> Result + Send + Sync + - 'static; - -/// Boxed data payload used for registering new transports. -/// -/// Currently only contains a field which knows how to create transports. -struct TransportData { - factory: Box, -} - -/// Instance of a `git_smart_subtransport`, must use `#[repr(C)]` to ensure that -/// the C fields come first. -#[repr(C)] -struct RawSmartSubtransport { - raw: raw::git_smart_subtransport, - obj: Box, -} - -/// Instance of a `git_smart_subtransport_stream`, must use `#[repr(C)]` to -/// ensure that the C fields come first. -#[repr(C)] -struct RawSmartSubtransportStream { - raw: raw::git_smart_subtransport_stream, - obj: Box, -} - -/// Add a custom transport definition, to be used in addition to the built-in -/// set of transports that come with libgit2. -/// -/// This function is unsafe as it needs to be externally synchronized with calls -/// to creation of other transports. -pub unsafe fn register(prefix: &str, factory: F) -> Result<(), Error> - where F: Fn(&Remote) -> Result + Send + Sync + 'static -{ - let mut data = Box::new(TransportData { - factory: Box::new(factory), - }); - let prefix = try!(CString::new(prefix)); - let datap = (&mut *data) as *mut TransportData as *mut c_void; - try_call!(raw::git_transport_register(prefix, - transport_factory, - datap)); - mem::forget(data); - Ok(()) -} - -impl Transport { - /// Creates a new transport which will use the "smart" transport protocol - /// for transferring data. - /// - /// A smart transport requires a *subtransport* over which data is actually - /// communicated, but this subtransport largely just needs to be able to - /// read() and write(). The subtransport provided will be used to make - /// connections which can then be read/written from. - /// - /// The `rpc` argument is `true` if the protocol is stateless, false - /// otherwise. For example `http://` is stateless but `git://` is not. - pub fn smart(remote: &Remote, - rpc: bool, - subtransport: S) -> Result - where S: SmartSubtransport - { - let mut ret = 0 as *mut _; - - let mut raw = Box::new(RawSmartSubtransport { - raw: raw::git_smart_subtransport { - action: subtransport_action, - close: subtransport_close, - free: subtransport_free, - }, - obj: Box::new(subtransport), - }); - let mut defn = raw::git_smart_subtransport_definition { - callback: smart_factory, - rpc: rpc as c_uint, - param: &mut *raw as *mut _ as *mut _, - }; - - // Currently there's no way to pass a paload via the - // git_smart_subtransport_definition structure, but it's only used as a - // configuration for the initial creation of the smart transport (verified - // by reading the current code, hopefully it doesn't change!). - // - // We, however, need some state (gotta pass in our - // `RawSmartSubtransport`). This also means that this block must be - // entirely synchronized with a lock (boo!) - unsafe { - try_call!(raw::git_transport_smart(&mut ret, remote.raw(), - &mut defn as *mut _ as *mut _)); - mem::forget(raw); // ownership transport to `ret` - } - return Ok(Transport { raw: ret, owned: true }); - - extern fn smart_factory(out: *mut *mut raw::git_smart_subtransport, - _owner: *mut raw::git_transport, - ptr: *mut c_void) -> c_int { - unsafe { - *out = ptr as *mut raw::git_smart_subtransport; - 0 - } - } - } -} - -impl Drop for Transport { - fn drop(&mut self) { - if self.owned { - unsafe { - ((*self.raw).free)(self.raw) - } - } - } -} - -// callback used by register() to create new transports -extern fn transport_factory(out: *mut *mut raw::git_transport, - owner: *mut raw::git_remote, - param: *mut c_void) -> c_int { - struct Bomb<'a> { remote: Option> } - impl<'a> Drop for Bomb<'a> { - fn drop(&mut self) { - // TODO: maybe a method instead? - mem::forget(self.remote.take()); - } - } - - panic::wrap(|| unsafe { - let remote = Bomb { remote: Some(Binding::from_raw(owner)) }; - let data = &mut *(param as *mut TransportData); - match (data.factory)(remote.remote.as_ref().unwrap()) { - Ok(mut transport) => { - *out = transport.raw; - transport.owned = false; - 0 - } - Err(e) => e.raw_code() as c_int, - } - }).unwrap_or(-1) -} - -// callback used by smart transports to delegate an action to a -// `SmartSubtransport` trait object. -extern fn subtransport_action(stream: *mut *mut raw::git_smart_subtransport_stream, - raw_transport: *mut raw::git_smart_subtransport, - url: *const c_char, - action: raw::git_smart_service_t) -> c_int { - panic::wrap(|| unsafe { - let url = CStr::from_ptr(url).to_bytes(); - let url = match str::from_utf8(url).ok() { - Some(s) => s, - None => return -1, - }; - let action = match action { - raw::GIT_SERVICE_UPLOADPACK_LS => Service::UploadPackLs, - raw::GIT_SERVICE_UPLOADPACK => Service::UploadPack, - raw::GIT_SERVICE_RECEIVEPACK_LS => Service::ReceivePackLs, - raw::GIT_SERVICE_RECEIVEPACK => Service::ReceivePack, - n => panic!("unknown action: {}", n), - }; - let transport = &mut *(raw_transport as *mut RawSmartSubtransport); - let obj = match transport.obj.action(url, action) { - Ok(s) => s, - Err(e) => return e.raw_code() as c_int, - }; - *stream = mem::transmute(Box::new(RawSmartSubtransportStream { - raw: raw::git_smart_subtransport_stream { - subtransport: raw_transport, - read: stream_read, - write: stream_write, - free: stream_free, - }, - obj: obj, - })); - 0 - }).unwrap_or(-1) -} - -// callback used by smart transports to close a `SmartSubtransport` trait -// object. -extern fn subtransport_close(transport: *mut raw::git_smart_subtransport) - -> c_int { - let ret = panic::wrap(|| unsafe { - let transport = &mut *(transport as *mut RawSmartSubtransport); - transport.obj.close() - }); - match ret { - Some(Ok(())) => 0, - Some(Err(e)) => e.raw_code() as c_int, - None => -1, - } -} - -// callback used by smart transports to free a `SmartSubtransport` trait -// object. -extern fn subtransport_free(transport: *mut raw::git_smart_subtransport) { - let _ = panic::wrap(|| unsafe { - mem::transmute::<_, Box>(transport); - }); -} - -// callback used by smart transports to read from a `SmartSubtransportStream` -// object. -extern fn stream_read(stream: *mut raw::git_smart_subtransport_stream, - buffer: *mut c_char, - buf_size: size_t, - bytes_read: *mut size_t) -> c_int { - let ret = panic::wrap(|| unsafe { - let transport = &mut *(stream as *mut RawSmartSubtransportStream); - let buf = slice::from_raw_parts_mut(buffer as *mut u8, - buf_size as usize); - match transport.obj.read(buf) { - Ok(n) => { *bytes_read = n as size_t; Ok(n) } - e => e, - } - }); - match ret { - Some(Ok(_)) => 0, - Some(Err(e)) => unsafe { set_err(e); -2 }, - None => -1, - } -} - -// callback used by smart transports to write to a `SmartSubtransportStream` -// object. -extern fn stream_write(stream: *mut raw::git_smart_subtransport_stream, - buffer: *const c_char, - len: size_t) -> c_int { - let ret = panic::wrap(|| unsafe { - let transport = &mut *(stream as *mut RawSmartSubtransportStream); - let buf = slice::from_raw_parts(buffer as *const u8, len as usize); - transport.obj.write_all(buf) - }); - match ret { - Some(Ok(())) => 0, - Some(Err(e)) => unsafe { set_err(e); -2 }, - None => -1, - } -} - -unsafe fn set_err(e: io::Error) { - let s = CString::new(e.to_string()).unwrap(); - raw::giterr_set_str(raw::GITERR_NET as c_int, s.as_ptr()) -} - -// callback used by smart transports to free a `SmartSubtransportStream` -// object. -extern fn stream_free(stream: *mut raw::git_smart_subtransport_stream) { - let _ = panic::wrap(|| unsafe { - mem::transmute::<_, Box>(stream); - }); -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/treebuilder.rs cargo-0.19.0/vendor/git2-0.6.3/src/treebuilder.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/treebuilder.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/treebuilder.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,193 +0,0 @@ -use std::marker; - -use libc::{c_int, c_void}; - -use {panic, raw, tree, Error, Oid, Repository, TreeEntry}; -use util::{Binding, IntoCString}; - -/// Constructor for in-memory trees -pub struct TreeBuilder<'repo> { - raw: *mut raw::git_treebuilder, - _marker: marker::PhantomData<&'repo Repository>, -} - -impl<'repo> TreeBuilder<'repo> { - /// Clear all the entries in the builder - pub fn clear(&mut self) { - unsafe { raw::git_treebuilder_clear(self.raw) } - } - - /// Get the number of entries - pub fn len(&self) -> usize { - unsafe { raw::git_treebuilder_entrycount(self.raw) as usize } - } - - /// Get en entry from the builder from its filename - pub fn get

(&self, filename: P) -> Result, Error> - where P: IntoCString - { - let filename = try!(filename.into_c_string()); - unsafe { - let ret = raw::git_treebuilder_get(self.raw, filename.as_ptr()); - if ret.is_null() { - Ok(None) - } else { - Ok(Some(tree::entry_from_raw_const(ret))) - } - } - } - - /// Add or update an entry in the builder - /// - /// No attempt is made to ensure that the provided Oid points to - /// an object of a reasonable type (or any object at all). - /// - /// The mode given must be one of 0o040000, 0o100644, 0o100755, 0o120000 or - /// 0o160000 currently. - pub fn insert(&mut self, filename: P, oid: Oid, - filemode: i32) -> Result { - let filename = try!(filename.into_c_string()); - let filemode = filemode as raw::git_filemode_t; - - let mut ret = 0 as *const raw::git_tree_entry; - unsafe { - try_call!(raw::git_treebuilder_insert(&mut ret, self.raw, filename, - oid.raw(), filemode)); - Ok(tree::entry_from_raw_const(ret)) - } - } - - /// Remove an entry from the builder by its filename - pub fn remove(&mut self, filename: P) -> Result<(), Error> { - let filename = try!(filename.into_c_string()); - unsafe { - try_call!(raw::git_treebuilder_remove(self.raw, filename)); - } - Ok(()) - } - - /// Selectively remove entries from the tree - /// - /// Values for which the filter returns `true` will be kept. Note - /// that this behavior is different from the libgit2 C interface. - pub fn filter(&mut self, mut filter: F) - where F: FnMut(&TreeEntry) -> bool - { - let mut cb: &mut FilterCb = &mut filter; - let ptr = &mut cb as *mut _; - unsafe { - raw::git_treebuilder_filter(self.raw, filter_cb, ptr as *mut _); - panic::check(); - } - } - - /// Write the contents of the TreeBuilder as a Tree object and - /// return its Oid - pub fn write(&self) -> Result { - let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; - unsafe { - try_call!(raw::git_treebuilder_write(&mut raw, self.raw())); - Ok(Binding::from_raw(&raw as *const _)) - } - } -} - -type FilterCb<'a> = FnMut(&TreeEntry) -> bool + 'a; - -extern fn filter_cb(entry: *const raw::git_tree_entry, - payload: *mut c_void) -> c_int { - let ret = panic::wrap(|| unsafe { - // There's no way to return early from git_treebuilder_filter. - if panic::panicked() { - true - } else { - let entry = tree::entry_from_raw_const(entry); - let payload = payload as *mut &mut FilterCb; - (*payload)(&entry) - } - }); - if ret == Some(false) {1} else {0} -} - -impl<'repo> Binding for TreeBuilder<'repo> { - type Raw = *mut raw::git_treebuilder; - - unsafe fn from_raw(raw: *mut raw::git_treebuilder) -> TreeBuilder<'repo> { - TreeBuilder { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *mut raw::git_treebuilder { self.raw } -} - -impl<'repo> Drop for TreeBuilder<'repo> { - fn drop(&mut self) { - unsafe { raw::git_treebuilder_free(self.raw) } - } -} - -#[cfg(test)] -mod tests { - use ObjectType; - - #[test] - fn smoke() { - let (_td, repo) = ::test::repo_init(); - - let mut builder = repo.treebuilder(None).unwrap(); - assert_eq!(builder.len(), 0); - let blob = repo.blob(b"data").unwrap(); - { - let entry = builder.insert("a", blob, 0o100644).unwrap(); - assert_eq!(entry.kind(), Some(ObjectType::Blob)); - } - builder.insert("b", blob, 0o100644).unwrap(); - assert_eq!(builder.len(), 2); - builder.remove("a").unwrap(); - assert_eq!(builder.len(), 1); - assert_eq!(builder.get("b").unwrap().unwrap().id(), blob); - builder.clear(); - assert_eq!(builder.len(), 0); - } - - #[test] - fn write() { - let (_td, repo) = ::test::repo_init(); - - let mut builder = repo.treebuilder(None).unwrap(); - let data = repo.blob(b"data").unwrap(); - builder.insert("name", data, 0o100644).unwrap(); - let tree = builder.write().unwrap(); - let tree = repo.find_tree(tree).unwrap(); - let entry = tree.get(0).unwrap(); - assert_eq!(entry.name(), Some("name")); - let blob = entry.to_object(&repo).unwrap(); - let blob = blob.as_blob().unwrap(); - assert_eq!(blob.content(), b"data"); - - let builder = repo.treebuilder(Some(&tree)).unwrap(); - assert_eq!(builder.len(), 1); - } - - #[test] - fn filter() { - let (_td, repo) = ::test::repo_init(); - - let mut builder = repo.treebuilder(None).unwrap(); - let blob = repo.blob(b"data").unwrap(); - let tree = { - let head = repo.head().unwrap() - .peel(ObjectType::Commit).unwrap(); - let head = head.as_commit().unwrap(); - head.tree_id() - }; - builder.insert("blob", blob, 0o100644).unwrap(); - builder.insert("dir", tree, 0o040000).unwrap(); - builder.insert("dir2", tree, 0o040000).unwrap(); - - builder.filter(|_| true); - assert_eq!(builder.len(), 3); - builder.filter(|e| e.kind().unwrap() != ObjectType::Blob); - assert_eq!(builder.len(), 2); - builder.filter(|_| false); - assert_eq!(builder.len(), 0); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/tree.rs cargo-0.19.0/vendor/git2-0.6.3/src/tree.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/tree.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/tree.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,388 +0,0 @@ -use std::mem; -use std::cmp::Ordering; -use std::ffi::CString; -use std::ops::Range; -use std::marker; -use std::path::Path; -use std::str; -use libc; - -use {raw, Oid, Repository, Error, Object, ObjectType}; -use util::{Binding, IntoCString}; - -/// A structure to represent a git [tree][1] -/// -/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects -pub struct Tree<'repo> { - raw: *mut raw::git_tree, - _marker: marker::PhantomData>, -} - -/// A structure representing an entry inside of a tree. An entry is borrowed -/// from a tree. -pub struct TreeEntry<'tree> { - raw: *mut raw::git_tree_entry, - owned: bool, - _marker: marker::PhantomData<&'tree raw::git_tree_entry>, -} - -/// An iterator over the entries in a tree. -pub struct TreeIter<'tree> { - range: Range, - tree: &'tree Tree<'tree>, -} - -impl<'repo> Tree<'repo> { - /// Get the id (SHA1) of a repository object - pub fn id(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_tree_id(&*self.raw)) } - } - - /// Get the number of entries listed in this tree. - pub fn len(&self) -> usize { - unsafe { raw::git_tree_entrycount(&*self.raw) as usize } - } - - /// Returns an iterator over the entries in this tree. - pub fn iter(&self) -> TreeIter { - TreeIter { range: 0..self.len(), tree: self } - } - - /// Lookup a tree entry by SHA value. - pub fn get_id(&self, id: Oid) -> Option { - unsafe { - let ptr = raw::git_tree_entry_byid(&*self.raw(), &*id.raw()); - if ptr.is_null() { - None - } else { - Some(entry_from_raw_const(ptr)) - } - } - } - - /// Lookup a tree entry by its position in the tree - pub fn get(&self, n: usize) -> Option { - unsafe { - let ptr = raw::git_tree_entry_byindex(&*self.raw(), - n as libc::size_t); - if ptr.is_null() { - None - } else { - Some(entry_from_raw_const(ptr)) - } - } - } - - /// Lookup a tree entry by its filename - pub fn get_name(&self, filename: &str) -> Option { - let filename = CString::new(filename).unwrap(); - unsafe { - let ptr = call!(raw::git_tree_entry_byname(&*self.raw(), filename)); - if ptr.is_null() { - None - } else { - Some(entry_from_raw_const(ptr)) - } - } - } - - /// Retrieve a tree entry contained in a tree or in any of its subtrees, - /// given its relative path. - pub fn get_path(&self, path: &Path) -> Result, Error> { - let path = try!(path.into_c_string()); - let mut ret = 0 as *mut raw::git_tree_entry; - unsafe { - try_call!(raw::git_tree_entry_bypath(&mut ret, &*self.raw(), path)); - Ok(Binding::from_raw(ret)) - } - } - - /// Casts this Tree to be usable as an `Object` - pub fn as_object(&self) -> &Object<'repo> { - unsafe { - &*(self as *const _ as *const Object<'repo>) - } - } - - /// Consumes Commit to be returned as an `Object` - pub fn into_object(self) -> Object<'repo> { - assert_eq!(mem::size_of_val(&self), mem::size_of::()); - unsafe { - mem::transmute(self) - } - } -} - -impl<'repo> Binding for Tree<'repo> { - type Raw = *mut raw::git_tree; - - unsafe fn from_raw(raw: *mut raw::git_tree) -> Tree<'repo> { - Tree { raw: raw, _marker: marker::PhantomData } - } - fn raw(&self) -> *mut raw::git_tree { self.raw } -} - -impl<'repo> Drop for Tree<'repo> { - fn drop(&mut self) { - unsafe { raw::git_tree_free(self.raw) } - } -} - -impl<'repo, 'iter> IntoIterator for &'iter Tree<'repo> { - type Item = TreeEntry<'iter>; - type IntoIter = TreeIter<'iter>; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -/// Create a new tree entry from the raw pointer provided. -/// -/// The lifetime of the entry is tied to the tree provided and the function -/// is unsafe because the validity of the pointer cannot be guaranteed. -pub unsafe fn entry_from_raw_const<'tree>(raw: *const raw::git_tree_entry) - -> TreeEntry<'tree> { - TreeEntry { - raw: raw as *mut raw::git_tree_entry, - owned: false, - _marker: marker::PhantomData, - } -} - -impl<'tree> TreeEntry<'tree> { - /// Get the id of the object pointed by the entry - pub fn id(&self) -> Oid { - unsafe { Binding::from_raw(raw::git_tree_entry_id(&*self.raw)) } - } - - /// Get the filename of a tree entry - /// - /// Returns `None` if the name is not valid utf-8 - pub fn name(&self) -> Option<&str> { - str::from_utf8(self.name_bytes()).ok() - } - - /// Get the filename of a tree entry - pub fn name_bytes(&self) -> &[u8] { - unsafe { - ::opt_bytes(self, raw::git_tree_entry_name(&*self.raw())).unwrap() - } - } - - /// Convert a tree entry to the object it points to. - pub fn to_object<'a>(&self, repo: &'a Repository) - -> Result, Error> { - let mut ret = 0 as *mut raw::git_object; - unsafe { - try_call!(raw::git_tree_entry_to_object(&mut ret, repo.raw(), - &*self.raw())); - Ok(Binding::from_raw(ret)) - } - } - - /// Get the type of the object pointed by the entry - pub fn kind(&self) -> Option { - ObjectType::from_raw(unsafe { raw::git_tree_entry_type(&*self.raw) }) - } - - /// Get the UNIX file attributes of a tree entry - pub fn filemode(&self) -> i32 { - unsafe { raw::git_tree_entry_filemode(&*self.raw) as i32 } - } - - /// Get the raw UNIX file attributes of a tree entry - pub fn filemode_raw(&self) -> i32 { - unsafe { raw::git_tree_entry_filemode_raw(&*self.raw) as i32 } - } - - /// Convert this entry of any lifetime into an owned signature with a static - /// lifetime. - /// - /// This will use the `Clone::clone` implementation under the hood. - pub fn to_owned(&self) -> TreeEntry<'static> { - unsafe { - let me = mem::transmute::<&TreeEntry<'tree>, &TreeEntry<'static>>(self); - me.clone() - } - } -} - -impl<'a> Binding for TreeEntry<'a> { - type Raw = *mut raw::git_tree_entry; - unsafe fn from_raw(raw: *mut raw::git_tree_entry) -> TreeEntry<'a> { - TreeEntry { - raw: raw, - owned: true, - _marker: marker::PhantomData, - } - } - fn raw(&self) -> *mut raw::git_tree_entry { self.raw } -} - -impl<'a> Clone for TreeEntry<'a> { - fn clone(&self) -> TreeEntry<'a> { - let mut ret = 0 as *mut raw::git_tree_entry; - unsafe { - assert_eq!(raw::git_tree_entry_dup(&mut ret, &*self.raw()), 0); - Binding::from_raw(ret) - } - } -} - -impl<'a> PartialOrd for TreeEntry<'a> { - fn partial_cmp(&self, other: &TreeEntry<'a>) -> Option { - Some(self.cmp(other)) - } -} -impl<'a> Ord for TreeEntry<'a> { - fn cmp(&self, other: &TreeEntry<'a>) -> Ordering { - match unsafe { raw::git_tree_entry_cmp(&*self.raw(), &*other.raw()) } { - 0 => Ordering::Equal, - n if n < 0 => Ordering::Less, - _ => Ordering::Greater, - } - } -} - -impl<'a> PartialEq for TreeEntry<'a> { - fn eq(&self, other: &TreeEntry<'a>) -> bool { - self.cmp(other) == Ordering::Equal - } -} -impl<'a> Eq for TreeEntry<'a> {} - -impl<'a> Drop for TreeEntry<'a> { - fn drop(&mut self) { - if self.owned { - unsafe { raw::git_tree_entry_free(self.raw) } - } - } -} - -impl<'tree> Iterator for TreeIter<'tree> { - type Item = TreeEntry<'tree>; - fn next(&mut self) -> Option> { - self.range.next().and_then(|i| self.tree.get(i)) - } - fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } -} -impl<'tree> DoubleEndedIterator for TreeIter<'tree> { - fn next_back(&mut self) -> Option> { - self.range.next_back().and_then(|i| self.tree.get(i)) - } -} -impl<'tree> ExactSizeIterator for TreeIter<'tree> {} - -#[cfg(test)] -mod tests { - use {Repository,Tree,TreeEntry,ObjectType,Object}; - use tempdir::TempDir; - use std::fs::File; - use std::io::prelude::*; - use std::path::Path; - - pub struct TestTreeIter<'a> { - entries: Vec>, - repo: &'a Repository, - } - - impl<'a> Iterator for TestTreeIter<'a> { - type Item = TreeEntry<'a>; - - fn next(&mut self) -> Option > { - if self.entries.is_empty() { - None - } else { - let entry = self.entries.remove(0); - - match entry.kind() { - Some(ObjectType::Tree) => { - let obj: Object<'a> = entry.to_object(self.repo).unwrap(); - - let tree: &Tree<'a> = obj.as_tree().unwrap(); - - for entry in tree.iter() { - self.entries.push(entry.to_owned()); - } - } - _ => {} - } - - Some(entry) - } - } - } - - fn tree_iter<'repo>(tree: &Tree<'repo>, repo: &'repo Repository) - -> TestTreeIter<'repo> { - let mut initial = vec![]; - - for entry in tree.iter() { - initial.push(entry.to_owned()); - } - - TestTreeIter { - entries: initial, - repo: repo, - } - } - - #[test] - fn smoke_tree_iter() { - let (td, repo) = ::test::repo_init(); - - setup_repo(&td, &repo); - - let head = repo.head().unwrap(); - let target = head.target().unwrap(); - let commit = repo.find_commit(target).unwrap(); - - let tree = repo.find_tree(commit.tree_id()).unwrap(); - assert_eq!(tree.id(), commit.tree_id()); - assert_eq!(tree.len(), 1); - - for entry in tree_iter(&tree, &repo) { - println!("iter entry {:?}", entry.name()); - } - } - - fn setup_repo(td: &TempDir, repo: &Repository) { - let mut index = repo.index().unwrap(); - File::create(&td.path().join("foo")).unwrap().write_all(b"foo").unwrap(); - index.add_path(Path::new("foo")).unwrap(); - let id = index.write_tree().unwrap(); - let sig = repo.signature().unwrap(); - let tree = repo.find_tree(id).unwrap(); - let parent = repo.find_commit(repo.head().unwrap().target() - .unwrap()).unwrap(); - repo.commit(Some("HEAD"), &sig, &sig, "another commit", - &tree, &[&parent]).unwrap(); - } - - #[test] - fn smoke() { - let (td, repo) = ::test::repo_init(); - - setup_repo(&td, &repo); - - let head = repo.head().unwrap(); - let target = head.target().unwrap(); - let commit = repo.find_commit(target).unwrap(); - - let tree = repo.find_tree(commit.tree_id()).unwrap(); - assert_eq!(tree.id(), commit.tree_id()); - assert_eq!(tree.len(), 1); - { - let e1 = tree.get(0).unwrap(); - assert!(e1 == tree.get_id(e1.id()).unwrap()); - assert!(e1 == tree.get_name("foo").unwrap()); - assert!(e1 == tree.get_path(Path::new("foo")).unwrap()); - assert_eq!(e1.name(), Some("foo")); - e1.to_object(&repo).unwrap(); - } - tree.into_object(); - - repo.find_object(commit.tree_id(), None).unwrap().as_tree().unwrap(); - repo.find_object(commit.tree_id(), None).unwrap().into_tree().ok().unwrap(); - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/src/util.rs cargo-0.19.0/vendor/git2-0.6.3/src/util.rs --- cargo-0.17.0/vendor/git2-0.6.3/src/util.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/src/util.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,152 +0,0 @@ -use std::ffi::{CString, OsStr, OsString}; -use std::iter::IntoIterator; -use std::path::{Path, PathBuf}; -use libc::{c_char, size_t}; - -use {raw, Error}; - -#[doc(hidden)] -pub trait IsNull { - fn is_ptr_null(&self) -> bool; -} -impl IsNull for *const T { - fn is_ptr_null(&self) -> bool { - self.is_null() - } -} -impl IsNull for *mut T { - fn is_ptr_null(&self) -> bool { - self.is_null() - } -} - -#[doc(hidden)] -pub trait Binding: Sized { - type Raw; - - unsafe fn from_raw(raw: Self::Raw) -> Self; - fn raw(&self) -> Self::Raw; - - unsafe fn from_raw_opt(raw: T) -> Option - where T: Copy + IsNull, Self: Binding - { - if raw.is_ptr_null() { - None - } else { - Some(Binding::from_raw(raw)) - } - } -} - -pub fn iter2cstrs(iter: I) -> Result<(Vec, Vec<*const c_char>, - raw::git_strarray), Error> - where T: IntoCString, I: IntoIterator -{ - let cstrs: Vec<_> = try!(iter.into_iter().map(|i| i.into_c_string()).collect()); - let ptrs = cstrs.iter().map(|i| i.as_ptr()).collect::>(); - let raw = raw::git_strarray { - strings: ptrs.as_ptr() as *mut _, - count: ptrs.len() as size_t, - }; - Ok((cstrs, ptrs, raw)) -} - -#[cfg(unix)] -pub fn bytes2path(b: &[u8]) -> &Path { - use std::os::unix::prelude::*; - Path::new(OsStr::from_bytes(b)) -} -#[cfg(windows)] -pub fn bytes2path(b: &[u8]) -> &Path { - use std::str; - Path::new(str::from_utf8(b).unwrap()) -} - -/// A class of types that can be converted to C strings. -/// -/// These types are represented internally as byte slices and it is quite rare -/// for them to contain an interior 0 byte. -pub trait IntoCString { - /// Consume this container, converting it into a CString - fn into_c_string(self) -> Result; -} - -impl<'a, T: IntoCString + Clone> IntoCString for &'a T { - fn into_c_string(self) -> Result { - self.clone().into_c_string() - } -} - -impl<'a> IntoCString for &'a str { - fn into_c_string(self) -> Result { - Ok(try!(CString::new(self))) - } -} - -impl IntoCString for String { - fn into_c_string(self) -> Result { - Ok(try!(CString::new(self.into_bytes()))) - } -} - -impl IntoCString for CString { - fn into_c_string(self) -> Result { Ok(self) } -} - -impl<'a> IntoCString for &'a Path { - fn into_c_string(self) -> Result { - let s: &OsStr = self.as_ref(); - s.into_c_string() - } -} - -impl IntoCString for PathBuf { - fn into_c_string(self) -> Result { - let s: OsString = self.into(); - s.into_c_string() - } -} - -impl<'a> IntoCString for &'a OsStr { - fn into_c_string(self) -> Result { - self.to_os_string().into_c_string() - } -} - -impl IntoCString for OsString { - #[cfg(unix)] - fn into_c_string(self) -> Result { - use std::os::unix::prelude::*; - let s: &OsStr = self.as_ref(); - Ok(try!(CString::new(s.as_bytes()))) - } - #[cfg(windows)] - fn into_c_string(self) -> Result { - match self.to_str() { - Some(s) => s.into_c_string(), - None => Err(Error::from_str("only valid unicode paths are accepted \ - on windows")), - } - } -} - -impl<'a> IntoCString for &'a [u8] { - fn into_c_string(self) -> Result { - Ok(try!(CString::new(self))) - } -} - -impl IntoCString for Vec { - fn into_c_string(self) -> Result { - Ok(try!(CString::new(self))) - } -} - -pub fn into_opt_c_string(opt_s: Option) -> Result, Error> - where S: IntoCString -{ - match opt_s { - None => Ok(None), - Some(s) => Ok(Some(try!(s.into_c_string()))), - } -} diff -Nru cargo-0.17.0/vendor/git2-0.6.3/.travis.yml cargo-0.19.0/vendor/git2-0.6.3/.travis.yml --- cargo-0.17.0/vendor/git2-0.6.3/.travis.yml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.3/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -language: rust -rust: - - stable - - beta - - nightly -sudo: false -before_script: - - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH -script: - - cargo test --no-default-features - - cargo test - - cargo run --manifest-path systest/Cargo.toml --release - - if [ "$TRAVIS_RUST_VERSION" = "nightly" ]; then - cargo test --features unstable; - cargo test --manifest-path git2-curl/Cargo.toml; - fi - - cargo doc --no-deps - - cargo doc --manifest-path=git2-curl/Cargo.toml --no-deps - - cargo doc --manifest-path=libgit2-sys/Cargo.toml --no-deps -after_success: - - travis-cargo --only nightly doc-upload - - travis-cargo coveralls --no-sudo -notifications: - email: - on_success: never -matrix: - include: - - os: osx - rust: stable - before_install: - - export OPENSSL_INCLUDE_DIR=`brew --prefix openssl`/include - - export OPENSSL_LIB_DIR=`brew --prefix openssl`/lib -addons: - apt: - sources: - - kalakris-cmake - packages: - - cmake - - libcurl4-openssl-dev - - libelf-dev - - libdw-dev -env: - global: - secure: "SVk5cv4VnBQAoaBXt9pIHk+FQ7Z58zT5EaPo7Ac81LltKztwHovhN/R1otKzgrAJqFsZ/nKR4cGyQGbYtfVJcsqweQVM75LI6Oh6lYyEdfX211ZI3SWQ50JO93CmwLtanC5UpECdXvJLCgXrHGJXuL1oi7hySGy47/yQlKH6eaM=" diff -Nru cargo-0.17.0/vendor/git2-0.6.4/appveyor.yml cargo-0.19.0/vendor/git2-0.6.4/appveyor.yml --- cargo-0.17.0/vendor/git2-0.6.4/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/appveyor.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,23 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-gnu + MSYS_BITS: 64 + - TARGET: i686-pc-windows-gnu + MSYS_BITS: 32 + - TARGET: x86_64-pc-windows-msvc + - TARGET: i686-pc-windows-msvc +install: + - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" + - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" + - set PATH=%PATH%;C:\Program Files (x86)\Rust\bin + - if defined MSYS_BITS set PATH=C:\msys64\mingw%MSYS_BITS%\bin;C:\msys64\usr\bin;%PATH% + - set CARGO_TARGET_DIR=%APPVEYOR_BUILD_FOLDER%\target + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test --target %TARGET% + - cargo test --no-default-features --target %TARGET% + - cargo run --manifest-path systest/Cargo.toml --target %TARGET% diff -Nru cargo-0.17.0/vendor/git2-0.6.4/.cargo-checksum.json cargo-0.19.0/vendor/git2-0.6.4/.cargo-checksum.json --- cargo-0.17.0/vendor/git2-0.6.4/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f990d3cc59423f4ac490adef9eb87a783f255505bf27ea8e88cd00f89fc46a56",".gitmodules":"768f0798b18f77ffaf1ed319c765a12894c838193490c76478b1cda14cfd0893",".travis.yml":"98644ce2fff201b5f0337eaccc6a0ddb536e5d8acf8ed962e5a008a647756308","Cargo.toml":"5594ae701c6bacae4ca8a624ac9d25c6de3909862e14add194b3cfa6d60c174b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"ec491af87840bbc50d3681193db020703e4eb9d44f8294c93ec465ca6474c106","appveyor.yml":"b381895d3a8863f2c0926efdb66f40e9a84cd8811337d78a7e5cd64e1a61c201","examples/add.rs":"7c602f1122bbf107d7ffd56ddf29ff2d14e77c4adf7f739cd2dd9e73ada10513","examples/blame.rs":"841558268e9cb3847bea87640c328356f908d1c8faa3fa10d78d659e59f32db3","examples/cat-file.rs":"6685899cef3e2414e121005e24125a0e8f250e4c073d2228fa05394e9a9e2f7a","examples/clone.rs":"e9da03f5deac59edaf6ae894b7cb9a7e5b33acb3e43b7d730d79483eb921930b","examples/diff.rs":"35a2268af23e8a299865ff7895d98227ad86c65132730eb1e3eb110888c3d85b","examples/fetch.rs":"03dd1a64a79f58675092d6c9bd9620e3cf0e43e5dce5688800bb4fc262201718","examples/init.rs":"f409d9f1145e77a39eeacf3008ee8b4ac9fe7542a291cd40b423d26fe43a34b7","examples/log.rs":"9e3c01117bc7359dc628ccb94148c959e78c44a7bbf0e8f32cfa67cc38b3cda0","examples/ls-remote.rs":"3cc68143edcc57490ecb310eb7101713a7327b4a18a467d620ef38cf31c9e71c","examples/rev-list.rs":"ce6060a45a1c81466e3ae9f34c45f14402bdb1022ef3ad502fd2036abbc7dfe5","examples/rev-parse.rs":"6ec6091f2fd9d8f4f474466d0b4bbaf356dcc804dc9889a0d31826c7c16b08d7","examples/status.rs":"953ec422081bd04a8254435f034fe8f87e1974090f08fd1a000e4e0107821544","examples/tag.rs":"07d72b4fcbb0997c0f1d96020b914d87dae4e066302b0efd244a7752cc76260e","src/blame.rs":"474cf57e34a90d2f5e190131e18d8793ea6b673b6e833a64240f8a0f80aeebea","src/blob.rs":"762ce3196d067f01be4bf5d39027d16d885c0e2735670fc062750779d12445cc","src/branch.rs":"9c2a9e93e7c6d8a98e2018cdec82396ae4dbfbb1918029e04c1ac889cf884847","src/buf.rs":"ff2ec90d03cbe76a61a1a8c913126b151c5fbaba4dc972ad378ecd630e644b6d","src/build.rs":"e194d766e84e04fae511b978ee44617bae6bba2d376d47072f04c25d467cc4f5","src/call.rs":"cae278421b3b0c3dae2085f01294f6834eac125ecc0f2e44397a9c073a00284b","src/cert.rs":"b90593113ec89c71967ae6b3aea750690c6fa936ea0d8ebb7cb506009c360af7","src/commit.rs":"3a8394d503f44dc65b52ae3911d1ea9c5b2c2c279fd4af158222658f531ec486","src/config.rs":"ccf1a08187b3ea1fe67e05e6d627ba5b27e4a2bd90056ca5bf254b514639550c","src/cred.rs":"1e4a5ccca602789c17d9bfd3dbde26aca6a561519daf36a1279785dd2a5cceda","src/describe.rs":"0210d92c3cf4626e6df0a290549cbf970acd714032c976fa320166e64f80f6c2","src/diff.rs":"f4db87ed443256e98cb8d9646747831acffe2e15141795df0da33a1bf23d6a95","src/error.rs":"560ae0f41be03070fac4de0dce8a3d9a1d16047fdd6c054d6fe64a634406e2ca","src/index.rs":"5ae43578c88d35f0532a70de5ad23ca4d13a423bcebdfe06b879a9bf4874694d","src/lib.rs":"71d9b17a0d343561db8bbe18c2d26ed869f99e36c7acd8ce4867a3820fd48aa2","src/merge.rs":"ed6986a77d1ae8793d9c6a8df052f964f514a0205abdf8dc4d0af24bf5908532","src/message.rs":"b10d9ff203702beb97a48cdfb9a0d19f650ef4427c006bd175d72f7f24f0d9ba","src/note.rs":"6cb91b5ae8c58c4d3a46fccc55ceaf8f10a522e2f2c88c19575e7314029359b2","src/object.rs":"3cf6dd0fd75bb0070987812419a847bb19ae68c13fcd5483056450cc754c770d","src/oid.rs":"21a94d1d3c5f3bb5abb10f74cea206186660b05eafd4957a21f7cbbf89cee933","src/oid_array.rs":"a1d43026adfdf01e12fcee03462db41372a2eec08dc710855775e3a2b2b33117","src/packbuilder.rs":"2f4ff842412bdf7a545ba4361cb8e88181a677da214f207f0bb39eb937916b56","src/panic.rs":"d8a6e8899f95ad1fa041278776fe3a693689d2e7682058398868e8403baaa960","src/patch.rs":"5ede15ddedea25a408ec4f74c291ab971ce5a826927794c569173f68a57c0222","src/pathspec.rs":"adbef0a6d9f9415790d1b578ff0f71394acab18b594f7bef3910b855591fe059","src/proxy_options.rs":"6ff8717ce32d8536afee0da47cc19a4ee0df3b88c6b4736547279b699deac0ca","src/reference.rs":"d0f1e43c6164f1d8e5fd6dbdc25df5212a9ed7c0b674550ab80c3f248da578bc","src/reflog.rs":"fe88541c6297f10d8aae4fbcddcc63a9f329b6ff277d342dc14b667c01c5c69e","src/refspec.rs":"dce85c33987bb9aa23af09b782bda0e82b6cc585727bfe059780460c9993ac94","src/remote.rs":"32a3447b81d00b8c78ccfd015b257039ecb898b14124c170cf266ae2bdd6624e","src/remote_callbacks.rs":"607dc1a25f7288095761bb9341d03dcac26b229ea5a1a5f0b921c973004a4512","src/repo.rs":"e501528a5094883a8572f3bf0a93cb78f8d21f25b59aff6ba4d92b9d8ea5a1ae","src/revspec.rs":"b68ee5df102191defc73eabdd948bc2681f9b900047a309fc208d640e5c493ad","src/revwalk.rs":"b04701b8c91b172aef1bc1c1592252998ac94d3be893364d0513aad453c05d68","src/signature.rs":"7671512cad49e164fb2763320706e72d31a7b25b7e200037100048bb555cb13b","src/stash.rs":"c03e598d970b42768159c468f23ec4ad9b5584d143bae60d75b2970da7e7cf32","src/status.rs":"ff74b77800e2b6d7d7a67f796753367500d24df27d6c0652813b554e42a0f6a2","src/string_array.rs":"ea164c415ee9cf7f654c0bbb29f58b22e9cc10d3a58a59156a9b50a8a027c27b","src/submodule.rs":"b3afe5e3fce3aa87ab328375a5730e7e86c88e1f40f0a274e5c1d8b530348a40","src/tag.rs":"17c0804e167ef56643bb729a12722ead5b72e55d83578e0130b0647673126d9c","src/test.rs":"5d251bdaea22b6d0ebdb992a169ec8671e292d75ac5f3876a104b65d3abb508c","src/time.rs":"3ee4c625da74e1ae0e1aa9488ebf666009736fcc01a1b61287f851906827d947","src/transport.rs":"13a1e0c3d5d774e1ef6200f93e5face4d3c9c0397813fc5e8aa30aa4bf6f5bd2","src/tree.rs":"8c0560204edf4f195f4a71cf6f1390169c8ebeaf742d589eabfe58e40c1651fe","src/treebuilder.rs":"fb60deaf889d9a7c98db5056f21d27820ebdeff6826bc227a79b44488dcda7b3","src/util.rs":"16c04cd1fda5d5280f55e3ed71f687c3367b7e90e508c5a5e277204eb11a7357"},"package":"046ae03385257040b2a35e56d9669d950dd911ba2bf48202fbef73ee6aab27b2"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/git2-0.6.4/Cargo.toml cargo-0.19.0/vendor/git2-0.6.4/Cargo.toml --- cargo-0.17.0/vendor/git2-0.6.4/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,47 @@ +[package] + +name = "git2" +version = "0.6.4" +authors = ["Alex Crichton "] +license = "MIT/Apache-2.0" +readme = "README.md" +keywords = ["git"] +repository = "https://github.com/alexcrichton/git2-rs" +homepage = "https://github.com/alexcrichton/git2-rs" +documentation = "https://docs.rs/git2-rs" +description = """ +Bindings to libgit2 for interoperating with git repositories. This library is +both threadsafe and memory safe and allows both reading and writing git +repositories. +""" +categories = ["api-bindings"] + +[badges] +travis-ci = { repository = "alexcrichton/git2-rs" } +appveyor = { repository = "alexcrichton/git2-rs" } + +[dependencies] +url = "1.0" +bitflags = "0.7" +libc = "0.2" +libgit2-sys = { path = "libgit2-sys", version = "0.6.6" } + +[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies] +openssl-sys = { version = "0.9.0", optional = true } +openssl-probe = { version = "0.1", optional = true } + +[dev-dependencies] +docopt = "0.6" +rustc-serialize = "0.3" +time = "0.1" +tempdir = "0.3" + +[features] +unstable = [] +default = ["ssh", "https", "curl"] +ssh = ["libgit2-sys/ssh"] +https = ["libgit2-sys/https", "openssl-sys", "openssl-probe"] +curl = ["libgit2-sys/curl"] + +[workspace] +members = ["systest", "git2-curl"] diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/add.rs cargo-0.19.0/vendor/git2-0.6.4/examples/add.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/add.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/add.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,84 @@ +/* + * libgit2 "add" example - shows how to modify the index + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] +#![allow(trivial_casts)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use std::path::Path; +use docopt::Docopt; +use git2::Repository; + +#[derive(RustcDecodable)] +struct Args { + arg_spec: Vec, + flag_dry_run: bool, + flag_verbose: bool, + flag_update: bool, +} + +fn run(args: &Args) -> Result<(), git2::Error> { + let repo = try!(Repository::open(&Path::new("."))); + let mut index = try!(repo.index()); + + let cb = &mut |path: &Path, _matched_spec: &[u8]| -> i32 { + let status = repo.status_file(path).unwrap(); + + let ret = if status.contains(git2::STATUS_WT_MODIFIED) || + status.contains(git2::STATUS_WT_NEW) { + println!("add '{}'", path.display()); + 0 + } else { + 1 + }; + + if args.flag_dry_run {1} else {ret} + }; + let cb = if args.flag_verbose || args.flag_update { + Some(cb as &mut git2::IndexMatchedPath) + } else { + None + }; + + if args.flag_update { + try!(index.update_all(args.arg_spec.iter(), cb)); + } else { + try!(index.add_all(args.arg_spec.iter(), git2::ADD_DEFAULT, cb)); + } + + try!(index.write()); + Ok(()) +} + +fn main() { + const USAGE: &'static str = " +usage: add [options] [--] [..] + +Options: + -n, --dry-run dry run + -v, --verbose be verbose + -u, --update update tracked files + -h, --help show this message +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/blame.rs cargo-0.19.0/vendor/git2-0.6.4/examples/blame.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/blame.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/blame.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,105 @@ +/* + * libgit2 "blame" example - shows how to use the blame API + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use docopt::Docopt; +use git2::{Repository, BlameOptions}; +use std::path::Path; +use std::io::{BufReader, BufRead}; + +#[derive(RustcDecodable)] #[allow(non_snake_case)] +struct Args { + arg_path: String, + arg_spec: Option, + flag_M: bool, + flag_C: bool, + flag_F: bool, +} + +fn run(args: &Args) -> Result<(), git2::Error> { + let repo = try!(Repository::open(".")); + let path = Path::new(&args.arg_path[..]); + + // Prepare our blame options + let mut opts = BlameOptions::new(); + opts.track_copies_same_commit_moves(args.flag_M) + .track_copies_same_commit_copies(args.flag_C) + .first_parent(args.flag_F); + + let mut commit_id = "HEAD".to_string(); + + // Parse spec + if let Some(spec) = args.arg_spec.as_ref() { + + let revspec = try!(repo.revparse(spec)); + + let (oldest, newest) = if revspec.mode().contains(git2::REVPARSE_SINGLE) { + (None, revspec.from()) + } else if revspec.mode().contains(git2::REVPARSE_RANGE) { + (revspec.from(), revspec.to()) + } else { + (None, None) + }; + + if let Some(commit) = oldest { + opts.oldest_commit(commit.id()); + } + + if let Some(commit) = newest { + opts.newest_commit(commit.id()); + if !commit.id().is_zero() { + commit_id = format!("{}", commit.id()) + } + } + + } + + let spec = format!("{}:{}", commit_id, path.display()); + let blame = try!(repo.blame_file(path, Some(&mut opts))); + let object = try!(repo.revparse_single(&spec[..])); + let blob = try!(repo.find_blob(object.id())); + let reader = BufReader::new(blob.content()); + + for (i, line) in reader.lines().enumerate() { + if let (Ok(line), Some(hunk)) = (line, blame.get_line(i+1)) { + let sig = hunk.final_signature(); + println!("{} {} <{}> {}", hunk.final_commit_id(), + String::from_utf8_lossy(sig.name_bytes()), + String::from_utf8_lossy(sig.email_bytes()), line); + } + } + + Ok(()) +} + +fn main() { + const USAGE: &'static str = " +usage: blame [options] [] + +Options: + -M find line moves within and across files + -C find line copies within and across files + -F follow only the first parent commits +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/cat-file.rs cargo-0.19.0/vendor/git2-0.6.4/examples/cat-file.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/cat-file.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/cat-file.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,143 @@ +/* + * libgit2 "cat-file" example - shows how to print data from the ODB + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use std::io::{self, Write}; + +use docopt::Docopt; +use git2::{Repository, ObjectType, Blob, Commit, Signature, Tag, Tree}; + +#[derive(RustcDecodable)] +struct Args { + arg_object: String, + flag_t: bool, + flag_s: bool, + flag_e: bool, + flag_p: bool, + flag_q: bool, + flag_v: bool, + flag_git_dir: Option, +} + +fn run(args: &Args) -> Result<(), git2::Error> { + let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); + let repo = try!(Repository::open(path)); + + let obj = try!(repo.revparse_single(&args.arg_object)); + if args.flag_v && !args.flag_q { + println!("{} {}\n--", obj.kind().unwrap().str(), obj.id()); + } + + if args.flag_t { + println!("{}", obj.kind().unwrap().str()); + } else if args.flag_s { + /* ... */ + } else if args.flag_e { + /* ... */ + } else if args.flag_p { + match obj.kind() { + Some(ObjectType::Blob) => { + show_blob(obj.as_blob().unwrap()); + } + Some(ObjectType::Commit) => { + show_commit(obj.as_commit().unwrap()); + } + Some(ObjectType::Tag) => { + show_tag(obj.as_tag().unwrap()); + } + Some(ObjectType::Tree) => { + show_tree(obj.as_tree().unwrap()); + } + Some(ObjectType::Any) | None => { + println!("unknown {}", obj.id()) + } + } + } + Ok(()) +} + +fn show_blob(blob: &Blob) { + io::stdout().write_all(blob.content()).unwrap(); +} + +fn show_commit(commit: &Commit) { + println!("tree {}", commit.tree_id()); + for parent in commit.parent_ids() { + println!("parent {}", parent); + } + show_sig("author", Some(commit.author())); + show_sig("committer", Some(commit.committer())); + if let Some(msg) = commit.message() { + println!("\n{}", msg); + } +} + +fn show_tag(tag: &Tag) { + println!("object {}", tag.target_id()); + println!("type {}", tag.target_type().unwrap().str()); + println!("tag {}", tag.name().unwrap()); + show_sig("tagger", tag.tagger()); + + if let Some(msg) = tag.message() { + println!("\n{}", msg); + } +} + +fn show_tree(tree: &Tree) { + for entry in tree.iter() { + println!("{:06o} {} {}\t{}", + entry.filemode(), + entry.kind().unwrap().str(), + entry.id(), + entry.name().unwrap()); + } +} + +fn show_sig(header: &str, sig: Option) { + let sig = match sig { Some(s) => s, None => return }; + let offset = sig.when().offset_minutes(); + let (sign, offset) = if offset < 0 {('-', -offset)} else {('+', offset)}; + let (hours, minutes) = (offset / 60, offset % 60); + println!("{} {} {} {}{:02}{:02}", + header, sig, sig.when().seconds(), sign, hours, minutes); + +} + +fn main() { + const USAGE: &'static str = " +usage: cat-file (-t | -s | -e | -p) [options] + +Options: + -t show the object type + -s show the object size + -e suppress all output + -p pretty print the contents of the object + -q suppress output + -v use verbose output + --git-dir use the specified directory as the base directory + -h, --help show this message +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/clone.rs cargo-0.19.0/vendor/git2-0.6.4/examples/clone.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/clone.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/clone.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,121 @@ +/* + * libgit2 "clone" example + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use docopt::Docopt; +use git2::build::{RepoBuilder, CheckoutBuilder}; +use git2::{RemoteCallbacks, Progress, FetchOptions}; +use std::cell::RefCell; +use std::io::{self, Write}; +use std::path::{Path, PathBuf}; + +#[derive(RustcDecodable)] +struct Args { + arg_url: String, + arg_path: String, +} + +struct State { + progress: Option>, + total: usize, + current: usize, + path: Option, + newline: bool, +} + +fn print(state: &mut State) { + let stats = state.progress.as_ref().unwrap(); + let network_pct = (100 * stats.received_objects()) / stats.total_objects(); + let index_pct = (100 * stats.indexed_objects()) / stats.total_objects(); + let co_pct = if state.total > 0 { + (100 * state.current) / state.total + } else { + 0 + }; + let kbytes = stats.received_bytes() / 1024; + if stats.received_objects() == stats.total_objects() && false { + if !state.newline { + println!(""); + state.newline = true; + } + print!("Resolving deltas {}/{}\r", stats.indexed_deltas(), + stats.total_deltas()); + } else { + print!("net {:3}% ({:4} kb, {:5}/{:5}) / idx {:3}% ({:5}/{:5}) \ + / chk {:3}% ({:4}/{:4}) {}\r", + network_pct, kbytes, stats.received_objects(), + stats.total_objects(), + index_pct, stats.indexed_objects(), stats.total_objects(), + co_pct, state.current, state.total, + state.path.as_ref().map(|s| s.to_string_lossy().into_owned()) + .unwrap_or(String::new())); + } + io::stdout().flush().unwrap(); +} + +fn run(args: &Args) -> Result<(), git2::Error> { + let state = RefCell::new(State { + progress: None, + total: 0, + current: 0, + path: None, + newline: false, + }); + let mut cb = RemoteCallbacks::new(); + cb.transfer_progress(|stats| { + let mut state = state.borrow_mut(); + state.progress = Some(stats.to_owned()); + print(&mut *state); + true + }); + + let mut co = CheckoutBuilder::new(); + co.progress(|path, cur, total| { + let mut state = state.borrow_mut(); + state.path = path.map(|p| p.to_path_buf()); + state.current = cur; + state.total = total; + print(&mut *state); + }); + + let mut fo = FetchOptions::new(); + fo.remote_callbacks(cb); + try!(RepoBuilder::new().fetch_options(fo).with_checkout(co) + .clone(&args.arg_url, Path::new(&args.arg_path))); + println!(""); + + Ok(()) +} + +fn main() { + const USAGE: &'static str = " +usage: add [options] + +Options: + -h, --help show this message +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} + diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/diff.rs cargo-0.19.0/vendor/git2-0.6.4/examples/diff.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/diff.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/diff.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,283 @@ +/* + * libgit2 "diff" example - shows how to use the diff API + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use std::str; + +use docopt::Docopt; +use git2::{Repository, Error, Object, ObjectType, DiffOptions, Diff}; +use git2::{DiffFindOptions, DiffFormat}; + +#[derive(RustcDecodable)] #[allow(non_snake_case)] +struct Args { + arg_from_oid: Option, + arg_to_oid: Option, + flag_patch: bool, + flag_cached: bool, + flag_nocached: bool, + flag_name_only: bool, + flag_name_status: bool, + flag_raw: bool, + flag_format: Option, + flag_color: bool, + flag_no_color: bool, + flag_R: bool, + flag_text: bool, + flag_ignore_space_at_eol: bool, + flag_ignore_space_change: bool, + flag_ignore_all_space: bool, + flag_ignored: bool, + flag_untracked: bool, + flag_patience: bool, + flag_minimal: bool, + flag_stat: bool, + flag_numstat: bool, + flag_shortstat: bool, + flag_summary: bool, + flag_find_renames: Option, + flag_find_copies: Option, + flag_find_copies_harder: bool, + flag_break_rewrites: bool, + flag_unified: Option, + flag_inter_hunk_context: Option, + flag_abbrev: Option, + flag_src_prefix: Option, + flag_dst_prefix: Option, + flag_git_dir: Option, +} + +const RESET: &'static str = "\u{1b}[m"; +const BOLD: &'static str = "\u{1b}[1m"; +const RED: &'static str = "\u{1b}[31m"; +const GREEN: &'static str = "\u{1b}[32m"; +const CYAN: &'static str = "\u{1b}[36m"; + +#[derive(PartialEq, Eq, Copy, Clone)] +enum Cache { Normal, Only, None } + +fn run(args: &Args) -> Result<(), Error> { + let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); + let repo = try!(Repository::open(path)); + + // Prepare our diff options based on the arguments given + let mut opts = DiffOptions::new(); + opts.reverse(args.flag_R) + .force_text(args.flag_text) + .ignore_whitespace_eol(args.flag_ignore_space_at_eol) + .ignore_whitespace_change(args.flag_ignore_space_change) + .ignore_whitespace(args.flag_ignore_all_space) + .include_ignored(args.flag_ignored) + .include_untracked(args.flag_untracked) + .patience(args.flag_patience) + .minimal(args.flag_minimal); + if let Some(amt) = args.flag_unified { opts.context_lines(amt); } + if let Some(amt) = args.flag_inter_hunk_context { opts.interhunk_lines(amt); } + if let Some(amt) = args.flag_abbrev { opts.id_abbrev(amt); } + if let Some(ref s) = args.flag_src_prefix { opts.old_prefix(&s); } + if let Some(ref s) = args.flag_dst_prefix { opts.new_prefix(&s); } + if let Some("diff-index") = args.flag_format.as_ref().map(|s| &s[..]) { + opts.id_abbrev(40); + } + + // Prepare the diff to inspect + let t1 = try!(tree_to_treeish(&repo, args.arg_from_oid.as_ref())); + let t2 = try!(tree_to_treeish(&repo, args.arg_to_oid.as_ref())); + let head = try!(tree_to_treeish(&repo, Some(&"HEAD".to_string()))).unwrap(); + let mut diff = match (t1, t2, args.cache()) { + (Some(t1), Some(t2), _) => { + try!(repo.diff_tree_to_tree(t1.as_tree(), t2.as_tree(), + Some(&mut opts))) + } + (t1, None, Cache::None) => { + let t1 = t1.unwrap_or(head); + try!(repo.diff_tree_to_workdir(t1.as_tree(), Some(&mut opts))) + } + (t1, None, Cache::Only) => { + let t1 = t1.unwrap_or(head); + try!(repo.diff_tree_to_index(t1.as_tree(), None, Some(&mut opts))) + } + (Some(t1), None, _) => { + try!(repo.diff_tree_to_workdir_with_index(t1.as_tree(), + Some(&mut opts))) + } + (None, None, _) => { + try!(repo.diff_index_to_workdir(None, Some(&mut opts))) + } + (None, Some(_), _) => unreachable!(), + }; + + // Apply rename and copy detection if requested + if args.flag_break_rewrites || args.flag_find_copies_harder || + args.flag_find_renames.is_some() || args.flag_find_copies.is_some() + { + let mut opts = DiffFindOptions::new(); + if let Some(t) = args.flag_find_renames { + opts.rename_threshold(t); + opts.renames(true); + } + if let Some(t) = args.flag_find_copies { + opts.copy_threshold(t); + opts.copies(true); + } + opts.copies_from_unmodified(args.flag_find_copies_harder) + .rewrites(args.flag_break_rewrites); + try!(diff.find_similar(Some(&mut opts))); + } + + // Generate simple output + let stats = args.flag_stat | args.flag_numstat | args.flag_shortstat | + args.flag_summary; + if stats { + try!(print_stats(&diff, args)); + } + if args.flag_patch || !stats { + if args.color() { print!("{}", RESET); } + let mut last_color = None; + try!(diff.print(args.diff_format(), |_delta, _hunk, line| { + if args.color() { + let next = match line.origin() { + '+' => Some(GREEN), + '-' => Some(RED), + '>' => Some(GREEN), + '<' => Some(RED), + 'F' => Some(BOLD), + 'H' => Some(CYAN), + _ => None + }; + if args.color() && next != last_color { + if last_color == Some(BOLD) || next == Some(BOLD) { + print!("{}", RESET); + } + print!("{}", next.unwrap_or(RESET)); + last_color = next; + } + } + + match line.origin() { + '+' | '-' | ' ' => print!("{}", line.origin()), + _ => {} + } + print!("{}", str::from_utf8(line.content()).unwrap()); + true + })); + if args.color() { print!("{}", RESET); } + } + + Ok(()) +} + +fn print_stats(diff: &Diff, args: &Args) -> Result<(), Error> { + let stats = try!(diff.stats()); + let mut format = git2::DIFF_STATS_NONE; + if args.flag_stat { + format = format | git2::DIFF_STATS_FULL; + } + if args.flag_shortstat { + format = format | git2::DIFF_STATS_SHORT; + } + if args.flag_numstat { + format = format | git2::DIFF_STATS_NUMBER; + } + if args.flag_summary { + format = format | git2::DIFF_STATS_INCLUDE_SUMMARY; + } + let buf = try!(stats.to_buf(format, 80)); + print!("{}", str::from_utf8(&*buf).unwrap()); + Ok(()) +} + +fn tree_to_treeish<'a>(repo: &'a Repository, arg: Option<&String>) + -> Result>, Error> { + let arg = match arg { Some(s) => s, None => return Ok(None) }; + let obj = try!(repo.revparse_single(arg)); + let tree = try!(obj.peel(ObjectType::Tree)); + Ok(Some(tree)) +} + +impl Args { + fn cache(&self) -> Cache { + if self.flag_cached {Cache::Only} + else if self.flag_nocached {Cache::None} + else {Cache::Normal} + } + fn color(&self) -> bool { self.flag_color && !self.flag_no_color } + fn diff_format(&self) -> DiffFormat { + if self.flag_patch {DiffFormat::Patch} + else if self.flag_name_only {DiffFormat::NameOnly} + else if self.flag_name_status {DiffFormat::NameStatus} + else if self.flag_raw {DiffFormat::Raw} + else { + match self.flag_format.as_ref().map(|s| &s[..]) { + Some("name") => DiffFormat::NameOnly, + Some("name-status") => DiffFormat::NameStatus, + Some("raw") => DiffFormat::Raw, + Some("diff-index") => DiffFormat::Raw, + _ => DiffFormat::Patch, + } + } + } +} + +fn main() { + const USAGE: &'static str = " +usage: diff [options] [ []] + +Options: + -p, --patch show output in patch format + --cached use staged changes as diff + --nocached do not use staged changes + --name-only show only names of changed files + --name-status show only names and status changes + --raw generate the raw format + --format= specify format for stat summary + --color use color output + --no-color never use color output + -R swap two inputs + -a, --text treat all files as text + --ignore-space-at-eol ignore changes in whitespace at EOL + -b, --ignore-space-change ignore changes in amount of whitespace + -w, --ignore-all-space ignore whitespace when comparing lines + --ignored show ignored files as well + --untracked show untracked files + --patience generate diff using the patience algorithm + --minimal spend extra time to find smallest diff + --stat generate a diffstat + --numstat similar to --stat, but more machine friendly + --shortstat only output last line of --stat + --summary output condensed summary of header info + -M, --find-renames set threshold for findind renames (default 50) + -C, --find-copies set threshold for finding copies (default 50) + --find-copies-harder inspect unmodified files for sources of copies + -B, --break-rewrites break complete rewrite changes into pairs + -U, --unified lints of context to show + --inter-hunk-context maximum lines of change between hunks + --abbrev length to abbreviate commits to + --src-prefix show given source prefix instead of 'a/' + --dst-prefix show given destinction prefix instead of 'b/' + --git-dir path to git repository to use + -h, --help show this message +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/fetch.rs cargo-0.19.0/vendor/git2-0.6.4/examples/fetch.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/fetch.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/fetch.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,131 @@ +/* + * libgit2 "fetch" example - shows how to fetch remote data + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use docopt::Docopt; +use git2::{Repository, RemoteCallbacks, Direction, AutotagOption, FetchOptions}; +use std::io::{self, Write}; +use std::str; + +#[derive(RustcDecodable)] +struct Args { + arg_remote: Option, +} + +fn run(args: &Args) -> Result<(), git2::Error> { + let repo = try!(Repository::open(".")); + let remote = args.arg_remote.as_ref().map(|s| &s[..]).unwrap_or("origin"); + + // Figure out whether it's a named remote or a URL + println!("Fetching {} for repo", remote); + let mut cb = RemoteCallbacks::new(); + let mut remote = try!(repo.find_remote(remote).or_else(|_| { + repo.remote_anonymous(remote) + })); + cb.sideband_progress(|data| { + print!("remote: {}", str::from_utf8(data).unwrap()); + io::stdout().flush().unwrap(); + true + }); + + // This callback gets called for each remote-tracking branch that gets + // updated. The message we output depends on whether it's a new one or an + // update. + cb.update_tips(|refname, a, b| { + if a.is_zero() { + println!("[new] {:20} {}", b, refname); + } else { + println!("[updated] {:10}..{:10} {}", a, b, refname); + } + true + }); + + // Here we show processed and total objects in the pack and the amount of + // received data. Most frontends will probably want to show a percentage and + // the download rate. + cb.transfer_progress(|stats| { + if stats.received_objects() == stats.total_objects() { + print!("Resolving deltas {}/{}\r", stats.indexed_deltas(), + stats.total_deltas()); + } else if stats.total_objects() > 0 { + print!("Received {}/{} objects ({}) in {} bytes\r", + stats.received_objects(), + stats.total_objects(), + stats.indexed_objects(), + stats.received_bytes()); + } + io::stdout().flush().unwrap(); + true + }); + + // Connect to the remote end specifying that we want to fetch information + // from it. + try!(remote.connect(Direction::Fetch)); + + // Download the packfile and index it. This function updates the amount of + // received data and the indexer stats which lets you inform the user about + // progress. + let mut fo = FetchOptions::new(); + fo.remote_callbacks(cb); + try!(remote.download(&[], Some(&mut fo))); + + { + // If there are local objects (we got a thin pack), then tell the user + // how many objects we saved from having to cross the network. + let stats = remote.stats(); + if stats.local_objects() > 0 { + println!("\rReceived {}/{} objects in {} bytes (used {} local \ + objects)", stats.indexed_objects(), + stats.total_objects(), stats.received_bytes(), + stats.local_objects()); + } else { + println!("\rReceived {}/{} objects in {} bytes", + stats.indexed_objects(), stats.total_objects(), + stats.received_bytes()); + } + } + + // Disconnect the underlying connection to prevent from idling. + remote.disconnect(); + + // Update the references in the remote's namespace to point to the right + // commits. This may be needed even if there was no packfile to download, + // which can happen e.g. when the branches have been changed but all the + // needed objects are available locally. + try!(remote.update_tips(None, true, + AutotagOption::Unspecified, None)); + + Ok(()) +} + +fn main() { + const USAGE: &'static str = " +usage: fetch [options] [] + +Options: + -h, --help show this message +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/init.rs cargo-0.19.0/vendor/git2-0.6.4/examples/init.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/init.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/init.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,151 @@ +/* + * libgit2 "init" example - shows how to initialize a new repo + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use docopt::Docopt; +use git2::{Repository, RepositoryInitOptions, RepositoryInitMode, Error}; +use std::path::{PathBuf, Path}; + +#[derive(RustcDecodable)] +struct Args { + arg_directory: String, + flag_quiet: bool, + flag_bare: bool, + flag_template: Option, + flag_separate_git_dir: Option, + flag_initial_commit: bool, + flag_shared: Option, +} + +fn run(args: &Args) -> Result<(), Error> { + let mut path = PathBuf::from(&args.arg_directory); + let repo = if !args.flag_bare && args.flag_template.is_none() && + args.flag_shared.is_none() && + args.flag_separate_git_dir.is_none() { + try!(Repository::init(&path)) + } else { + let mut opts = RepositoryInitOptions::new(); + opts.bare(args.flag_bare); + if let Some(ref s) = args.flag_template { + opts.template_path(Path::new(s)); + } + + // If you specified a separate git directory, then initialize + // the repository at that path and use the second path as the + // working directory of the repository (with a git-link file) + if let Some(ref s) = args.flag_separate_git_dir { + opts.workdir_path(&path); + path = PathBuf::from(s); + } + + if let Some(ref s) = args.flag_shared { + opts.mode(try!(parse_shared(&s))); + } + try!(Repository::init_opts(&path, &opts)) + }; + + // Print a message to stdout like "git init" does + if !args.flag_quiet { + if args.flag_bare || args.flag_separate_git_dir.is_some() { + path = repo.path().to_path_buf(); + } else { + path = repo.workdir().unwrap().to_path_buf(); + } + println!("Initialized empty Git repository in {}", path.display()); + } + + if args.flag_initial_commit { + try!(create_initial_commit(&repo)); + println!("Created empty initial commit"); + } + + Ok(()) +} + +/// Unlike regular "git init", this example shows how to create an initial empty +/// commit in the repository. This is the helper function that does that. +fn create_initial_commit(repo: &Repository) -> Result<(), Error> { + // First use the config to initialize a commit signature for the user. + let sig = try!(repo.signature()); + + // Now let's create an empty tree for this commit + let tree_id = { + let mut index = try!(repo.index()); + + // Outside of this example, you could call index.add_path() + // here to put actual files into the index. For our purposes, we'll + // leave it empty for now. + + try!(index.write_tree()) + }; + + let tree = try!(repo.find_tree(tree_id)); + + // Ready to create the initial commit. + // + // Normally creating a commit would involve looking up the current HEAD + // commit and making that be the parent of the initial commit, but here this + // is the first commit so there will be no parent. + try!(repo.commit(Some("HEAD"), &sig, &sig, "Initial commit", &tree, &[])); + + Ok(()) +} + +fn parse_shared(shared: &str) -> Result { + match shared { + "false" | "umask" => Ok(git2::REPOSITORY_INIT_SHARED_UMASK), + "true" | "group" => Ok(git2::REPOSITORY_INIT_SHARED_GROUP), + "all" | "world" => Ok(git2::REPOSITORY_INIT_SHARED_ALL), + _ => { + if shared.starts_with("0") { + match u32::from_str_radix(&shared[1..], 8).ok() { + Some(n) => { + return Ok(RepositoryInitMode::from_bits_truncate(n)) + } + None => { + Err(Error::from_str("invalid octal value for --shared")) + } + } + } else { + Err(Error::from_str("unknown value for --shared")) + } + } + } +} + +fn main() { + const USAGE: &'static str = " +usage: init [options] + +Options: + -q, --quiet don't print information to stdout + --bare initialize a new bare repository + --template use as an initialization template + --separate-git-dir use as the .git directory + --initial-commit create an initial empty commit + --shared permissions to create the repository with +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/log.rs cargo-0.19.0/vendor/git2-0.6.4/examples/log.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/log.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/log.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,262 @@ +/* + * libgit2 "log" example - shows how to walk history and get commit info + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate rustc_serialize; +extern crate docopt; +extern crate git2; +extern crate time; + +use std::str; +use docopt::Docopt; +use git2::{Repository, Signature, Commit, ObjectType, Time, DiffOptions}; +use git2::{Pathspec, Error, DiffFormat}; + +#[derive(RustcDecodable)] +struct Args { + arg_commit: Vec, + arg_spec: Vec, + flag_topo_order: bool, + flag_date_order: bool, + flag_reverse: bool, + flag_author: Option, + flag_committer: Option, + flag_grep: Option, + flag_git_dir: Option, + flag_skip: Option, + flag_max_count: Option, + flag_merges: bool, + flag_no_merges: bool, + flag_no_min_parents: bool, + flag_no_max_parents: bool, + flag_max_parents: Option, + flag_min_parents: Option, + flag_patch: bool, +} + +fn run(args: &Args) -> Result<(), Error> { + let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); + let repo = try!(Repository::open(path)); + let mut revwalk = try!(repo.revwalk()); + + // Prepare the revwalk based on CLI parameters + let base = if args.flag_reverse {git2::SORT_REVERSE} else {git2::SORT_NONE}; + revwalk.set_sorting(base | if args.flag_topo_order { + git2::SORT_TOPOLOGICAL + } else if args.flag_date_order { + git2::SORT_TIME + } else { + git2::SORT_NONE + }); + for commit in args.arg_commit.iter() { + if commit.starts_with("^") { + let obj = try!(repo.revparse_single(&commit[1..])); + try!(revwalk.hide(obj.id())); + continue + } + let revspec = try!(repo.revparse(&commit)); + if revspec.mode().contains(git2::REVPARSE_SINGLE) { + try!(revwalk.push(revspec.from().unwrap().id())); + } else { + let from = revspec.from().unwrap().id(); + let to = revspec.to().unwrap().id(); + try!(revwalk.push(to)); + if revspec.mode().contains(git2::REVPARSE_MERGE_BASE) { + let base = try!(repo.merge_base(from, to)); + let o = try!(repo.find_object(base, Some(ObjectType::Commit))); + try!(revwalk.push(o.id())); + } + try!(revwalk.hide(from)); + } + } + if args.arg_commit.len() == 0 { + try!(revwalk.push_head()); + } + + // Prepare our diff options and pathspec matcher + let (mut diffopts, mut diffopts2) = (DiffOptions::new(), DiffOptions::new()); + for spec in args.arg_spec.iter() { + diffopts.pathspec(spec); + diffopts2.pathspec(spec); + } + let ps = try!(Pathspec::new(args.arg_spec.iter())); + + // Filter our revwalk based on the CLI parameters + macro_rules! filter_try { + ($e:expr) => (match $e { Ok(t) => t, Err(e) => return Some(Err(e)) }) + } + let revwalk = revwalk.filter_map(|id| { + let id = filter_try!(id); + let commit = filter_try!(repo.find_commit(id)); + let parents = commit.parents().len(); + if parents < args.min_parents() { return None } + if let Some(n) = args.max_parents() { + if parents >= n { return None } + } + if args.arg_spec.len() > 0 { + match commit.parents().len() { + 0 => { + let tree = filter_try!(commit.tree()); + let flags = git2::PATHSPEC_NO_MATCH_ERROR; + if ps.match_tree(&tree, flags).is_err() { return None } + } + _ => { + let m = commit.parents().all(|parent| { + match_with_parent(&repo, &commit, &parent, &mut diffopts) + .unwrap_or(false) + }); + if !m { return None } + } + } + } + if !sig_matches(commit.author(), &args.flag_author) { return None } + if !sig_matches(commit.committer(), &args.flag_committer) { return None } + if !log_message_matches(commit.message(), &args.flag_grep) { return None } + Some(Ok(commit)) + }).skip(args.flag_skip.unwrap_or(0)).take(args.flag_max_count.unwrap_or(!0)); + + // print! + for commit in revwalk { + let commit = try!(commit); + print_commit(&commit); + if !args.flag_patch || commit.parents().len() > 1 { continue } + let a = if commit.parents().len() == 1 { + let parent = try!(commit.parent(0)); + Some(try!(parent.tree())) + } else { + None + }; + let b = try!(commit.tree()); + let diff = try!(repo.diff_tree_to_tree(a.as_ref(), Some(&b), + Some(&mut diffopts2))); + try!(diff.print(DiffFormat::Patch, |_delta, _hunk, line| { + match line.origin() { + ' ' | '+' | '-' => print!("{}", line.origin()), + _ => {} + } + print!("{}", str::from_utf8(line.content()).unwrap()); + true + })); + } + + Ok(()) +} + +fn sig_matches(sig: Signature, arg: &Option) -> bool { + match *arg { + Some(ref s) => { + sig.name().map(|n| n.contains(s)).unwrap_or(false) || + sig.email().map(|n| n.contains(s)).unwrap_or(false) + } + None => true + } +} + +fn log_message_matches(msg: Option<&str>, grep: &Option) -> bool { + match (grep, msg) { + (&None, _) => true, + (&Some(_), None) => false, + (&Some(ref s), Some(msg)) => msg.contains(s), + } +} + +fn print_commit(commit: &Commit) { + println!("commit {}", commit.id()); + + if commit.parents().len() > 1 { + print!("Merge:"); + for id in commit.parent_ids() { + print!(" {:.8}", id); + } + println!(""); + } + + let author = commit.author(); + println!("Author: {}", author); + print_time(&author.when(), "Date: "); + println!(""); + + for line in String::from_utf8_lossy(commit.message_bytes()).lines() { + println!(" {}", line); + } + println!(""); +} + +fn print_time(time: &Time, prefix: &str) { + let (offset, sign) = match time.offset_minutes() { + n if n < 0 => (-n, '-'), + n => (n, '+'), + }; + let (hours, minutes) = (offset / 60, offset % 60); + let ts = time::Timespec::new(time.seconds() + + (time.offset_minutes() as i64) * 60, 0); + let time = time::at(ts); + + println!("{}{} {}{:02}{:02}", prefix, + time.strftime("%a %b %e %T %Y").unwrap(), sign, hours, minutes); + +} + +fn match_with_parent(repo: &Repository, commit: &Commit, parent: &Commit, + opts: &mut DiffOptions) -> Result { + let a = try!(parent.tree()); + let b = try!(commit.tree()); + let diff = try!(repo.diff_tree_to_tree(Some(&a), Some(&b), Some(opts))); + Ok(diff.deltas().len() > 0) +} + +impl Args { + fn min_parents(&self) -> usize { + if self.flag_no_min_parents { return 0 } + self.flag_min_parents.unwrap_or(if self.flag_merges {2} else {0}) + } + + fn max_parents(&self) -> Option { + if self.flag_no_max_parents { return None } + self.flag_max_parents.or(if self.flag_no_merges {Some(1)} else {None}) + } +} + +fn main() { + const USAGE: &'static str = " +usage: log [options] [..] [--] [..] + +Options: + --topo-order sort commits in topological order + --date-order sort commits in date order + --reverse sort commits in reverse + --author author to sort by + --committer committer to sort by + --grep pattern to filter commit messages by + --git-dir alternative git directory to use + --skip number of commits to skip + -n, --max-count maximum number of commits to show + --merges only show merge commits + --no-merges don't show merge commits + --no-min-parents don't require a minimum number of parents + --no-max-parents don't require a maximum number of parents + --max-parents specify a maximum number of parents for a commit + --min-parents specify a minimum number of parents for a commit + -p, --patch show commit diff + -h, --help show this message +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/ls-remote.rs cargo-0.19.0/vendor/git2-0.6.4/examples/ls-remote.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/ls-remote.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/ls-remote.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,63 @@ +/* + * libgit2 "ls-remote" example + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use docopt::Docopt; +use git2::{Repository, Direction}; + +#[derive(RustcDecodable)] +struct Args { + arg_remote: String, +} + +fn run(args: &Args) -> Result<(), git2::Error> { + let repo = try!(Repository::open(".")); + let remote = &args.arg_remote; + let mut remote = try!(repo.find_remote(remote).or_else(|_| { + repo.remote_anonymous(remote) + })); + + // Connect to the remote and call the printing function for each of the + // remote references. + try!(remote.connect(Direction::Fetch)); + + // Get the list of references on the remote and print out their name next to + // what they point to. + for head in try!(remote.list()).iter() { + println!("{}\t{}", head.oid(), head.name()); + } + + Ok(()) +} + +fn main() { + const USAGE: &'static str = " +usage: ls-remote [option] + +Options: + -h, --help show this message +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/rev-list.rs cargo-0.19.0/vendor/git2-0.6.4/examples/rev-list.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/rev-list.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/rev-list.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,96 @@ +/* + * libgit2 "rev-list" example - shows how to transform a rev-spec into a list + * of commit ids + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use docopt::Docopt; +use git2::{Repository, Error, Revwalk, Oid}; + +#[derive(RustcDecodable)] +struct Args { + arg_spec: Vec, + flag_topo_order: bool, + flag_date_order: bool, + flag_reverse: bool, + flag_not: Vec, +} + +fn run(args: &Args) -> Result<(), git2::Error> { + let repo = try!(Repository::open(".")); + let mut revwalk = try!(repo.revwalk()); + + let base = if args.flag_reverse {git2::SORT_REVERSE} else {git2::SORT_NONE}; + revwalk.set_sorting(base | if args.flag_topo_order { + git2::SORT_TOPOLOGICAL + } else if args.flag_date_order { + git2::SORT_TIME + } else { + git2::SORT_NONE + }); + + let specs = args.flag_not.iter().map(|s| (s, true)) + .chain(args.arg_spec.iter().map(|s| (s, false))) + .map(|(spec, hide)| { + if spec.starts_with("^") {(&spec[1..], !hide)} else {(&spec[..], hide)} + }); + for (spec, hide) in specs { + let id = if spec.contains("..") { + let revspec = try!(repo.revparse(spec)); + if revspec.mode().contains(git2::REVPARSE_MERGE_BASE) { + return Err(Error::from_str("merge bases not implemented")) + } + try!(push(&mut revwalk, revspec.from().unwrap().id(), !hide)); + revspec.to().unwrap().id() + } else { + try!(repo.revparse_single(spec)).id() + }; + try!(push(&mut revwalk, id, hide)); + } + + for id in revwalk { + let id = try!(id); + println!("{}", id); + } + Ok(()) +} + +fn push(revwalk: &mut Revwalk, id: Oid, hide: bool) -> Result<(), Error> { + if hide {revwalk.hide(id)} else {revwalk.push(id)} +} + +fn main() { + const USAGE: &'static str = " +usage: rev-list [options] [--] ... + +Options: + --topo-order sort commits in topological order + --date-order sort commits in date order + --reverse sort commits in reverse + --not don't show + -h, --help show this message +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} + diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/rev-parse.rs cargo-0.19.0/vendor/git2-0.6.4/examples/rev-parse.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/rev-parse.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/rev-parse.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,69 @@ +/* + * libgit2 "rev-parse" example - shows how to parse revspecs + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use docopt::Docopt; +use git2::Repository; + +#[derive(RustcDecodable)] +struct Args { + arg_spec: String, + flag_git_dir: Option, +} + +fn run(args: &Args) -> Result<(), git2::Error> { + let path = args.flag_git_dir.as_ref().map(|s| &s[..]).unwrap_or("."); + let repo = try!(Repository::open(path)); + + let revspec = try!(repo.revparse(&args.arg_spec)); + + if revspec.mode().contains(git2::REVPARSE_SINGLE) { + println!("{}", revspec.from().unwrap().id()); + } else if revspec.mode().contains(git2::REVPARSE_RANGE) { + let to = revspec.to().unwrap(); + let from = revspec.from().unwrap(); + println!("{}", to.id()); + + if revspec.mode().contains(git2::REVPARSE_MERGE_BASE) { + let base = try!(repo.merge_base(from.id(), to.id())); + println!("{}", base); + } + + println!("^{}", from.id()); + } else { + return Err(git2::Error::from_str("invalid results from revparse")) + } + Ok(()) +} + +fn main() { + const USAGE: &'static str = " +usage: rev-parse [options] + +Options: + --git-dir directory for the git repository to check +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/status.rs cargo-0.19.0/vendor/git2-0.6.4/examples/status.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/status.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/status.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,373 @@ +/* + * libgit2 "status" example - shows how to use the status APIs + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use std::str; +use std::time::Duration; +use docopt::Docopt; +use git2::{Repository, Error, StatusOptions, ErrorCode, SubmoduleIgnore}; + +#[derive(RustcDecodable)] +struct Args { + arg_spec: Vec, + flag_short: bool, + flag_long: bool, + flag_porcelain: bool, + flag_branch: bool, + flag_z: bool, + flag_ignored: bool, + flag_untracked_files: Option, + flag_ignore_submodules: Option, + flag_git_dir: Option, + flag_repeat: bool, + flag_list_submodules: bool, +} + +#[derive(Eq, PartialEq)] +enum Format { Long, Short, Porcelain } + +fn run(args: &Args) -> Result<(), Error> { + let path = args.flag_git_dir.clone().unwrap_or(".".to_string()); + let repo = try!(Repository::open(&path)); + if repo.is_bare() { + return Err(Error::from_str("cannot report status on bare repository")) + } + + let mut opts = StatusOptions::new(); + opts.include_ignored(args.flag_ignored); + match args.flag_untracked_files.as_ref().map(|s| &s[..]) { + Some("no") => { opts.include_untracked(false); } + Some("normal") => { opts.include_untracked(true); } + Some("all") => { + opts.include_untracked(true).recurse_untracked_dirs(true); + } + Some(_) => return Err(Error::from_str("invalid untracked-files value")), + None => {} + } + match args.flag_ignore_submodules.as_ref().map(|s| &s[..]) { + Some("all") => { opts.exclude_submodules(true); } + Some(_) => return Err(Error::from_str("invalid ignore-submodules value")), + None => {} + } + opts.include_untracked(!args.flag_ignored); + for spec in args.arg_spec.iter() { + opts.pathspec(spec); + } + + loop { + if args.flag_repeat { + println!("\u{1b}[H\u{1b}[2J"); + } + + let statuses = try!(repo.statuses(Some(&mut opts))); + + if args.flag_branch { + try!(show_branch(&repo, args.format())); + } + if args.flag_list_submodules { + try!(print_submodules(&repo)); + } + + if args.format() == Format::Long { + print_long(statuses); + } else { + print_short(&repo, statuses); + } + + if args.flag_repeat { + std::thread::sleep(Duration::new(10, 0)); + } else { + return Ok(()) + } + } +} + +fn show_branch(repo: &Repository, format: Format) -> Result<(), Error> { + let head = match repo.head() { + Ok(head) => Some(head), + Err(ref e) if e.code() == ErrorCode::UnbornBranch || + e.code() == ErrorCode::NotFound => None, + Err(e) => return Err(e), + }; + let head = head.as_ref().and_then(|h| h.shorthand()); + + if format == Format::Long { + println!("# On branch {}", + head.unwrap_or("Not currently on any branch")); + } else { + println!("## {}", head.unwrap_or("HEAD (no branch)")); + } + Ok(()) +} + +fn print_submodules(repo: &Repository) -> Result<(), Error> { + let modules = try!(repo.submodules()); + println!("# Submodules"); + for sm in modules.iter() { + println!("# - submodule '{}' at {}", sm.name().unwrap(), + sm.path().display()); + } + Ok(()) +} + +// This function print out an output similar to git's status command in long +// form, including the command-line hints. +fn print_long(statuses: git2::Statuses) { + let mut header = false; + let mut rm_in_workdir = false; + let mut changes_in_index = false; + let mut changed_in_workdir = false; + + // Print index changes + for entry in statuses.iter().filter(|e| e.status() != git2::STATUS_CURRENT) { + if entry.status().contains(git2::STATUS_WT_DELETED) { + rm_in_workdir = true; + } + let istatus = match entry.status() { + s if s.contains(git2::STATUS_INDEX_NEW) => "new file: ", + s if s.contains(git2::STATUS_INDEX_MODIFIED) => "modified: ", + s if s.contains(git2::STATUS_INDEX_DELETED) => "deleted: ", + s if s.contains(git2::STATUS_INDEX_RENAMED) => "renamed: ", + s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => "typechange:", + _ => continue, + }; + if !header { + println!("\ +# Changes to be committed: +# (use \"git reset HEAD ...\" to unstage) +#"); + header = true; + } + + let old_path = entry.head_to_index().unwrap().old_file().path(); + let new_path = entry.head_to_index().unwrap().new_file().path(); + match (old_path, new_path) { + (Some(ref old), Some(ref new)) if old != new => { + println!("#\t{} {} -> {}", istatus, old.display(), + new.display()); + } + (old, new) => { + println!("#\t{} {}", istatus, old.or(new).unwrap().display()); + } + } + } + + if header { + changes_in_index = true; + println!("#"); + } + header = false; + + // Print workdir changes to tracked files + for entry in statuses.iter() { + // With `STATUS_OPT_INCLUDE_UNMODIFIED` (not used in this example) + // `index_to_workdir` may not be `None` even if there are no differences, + // in which case it will be a `Delta::Unmodified`. + if entry.status() == git2::STATUS_CURRENT || + entry.index_to_workdir().is_none() { + continue + } + + let istatus = match entry.status() { + s if s.contains(git2::STATUS_WT_MODIFIED) => "modified: ", + s if s.contains(git2::STATUS_WT_DELETED) => "deleted: ", + s if s.contains(git2::STATUS_WT_RENAMED) => "renamed: ", + s if s.contains(git2::STATUS_WT_TYPECHANGE) => "typechange:", + _ => continue, + }; + + if !header { + println!("\ +# Changes not staged for commit: +# (use \"git add{} ...\" to update what will be committed) +# (use \"git checkout -- ...\" to discard changes in working directory) +#\ + ", if rm_in_workdir {"/rm"} else {""}); + header = true; + } + + let old_path = entry.index_to_workdir().unwrap().old_file().path(); + let new_path = entry.index_to_workdir().unwrap().new_file().path(); + match (old_path, new_path) { + (Some(ref old), Some(ref new)) if old != new => { + println!("#\t{} {} -> {}", istatus, old.display(), + new.display()); + } + (old, new) => { + println!("#\t{} {}", istatus, old.or(new).unwrap().display()); + } + } + } + + if header { + changed_in_workdir = true; + println!("#"); + } + header = false; + + // Print untracked files + for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) { + if !header { + println!("\ +# Untracked files +# (use \"git add ...\" to include in what will be committed) +#"); + header = true; + } + let file = entry.index_to_workdir().unwrap().old_file().path().unwrap(); + println!("#\t{}", file.display()); + } + header = false; + + // Print ignored files + for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_IGNORED) { + if !header { + println!("\ +# Ignored files +# (use \"git add -f ...\" to include in what will be committed) +#"); + header = true; + } + let file = entry.index_to_workdir().unwrap().old_file().path().unwrap(); + println!("#\t{}", file.display()); + } + + if !changes_in_index && changed_in_workdir { + println!("no changes added to commit (use \"git add\" and/or \ + \"git commit -a\")"); + } +} + +// This version of the output prefixes each path with two status columns and +// shows submodule status information. +fn print_short(repo: &Repository, statuses: git2::Statuses) { + for entry in statuses.iter().filter(|e| e.status() != git2::STATUS_CURRENT) { + let mut istatus = match entry.status() { + s if s.contains(git2::STATUS_INDEX_NEW) => 'A', + s if s.contains(git2::STATUS_INDEX_MODIFIED) => 'M', + s if s.contains(git2::STATUS_INDEX_DELETED) => 'D', + s if s.contains(git2::STATUS_INDEX_RENAMED) => 'R', + s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => 'T', + _ => ' ', + }; + let mut wstatus = match entry.status() { + s if s.contains(git2::STATUS_WT_NEW) => { + if istatus == ' ' { istatus = '?'; } '?' + } + s if s.contains(git2::STATUS_WT_MODIFIED) => 'M', + s if s.contains(git2::STATUS_WT_DELETED) => 'D', + s if s.contains(git2::STATUS_WT_RENAMED) => 'R', + s if s.contains(git2::STATUS_WT_TYPECHANGE) => 'T', + _ => ' ', + }; + + if entry.status().contains(git2::STATUS_IGNORED) { + istatus = '!'; + wstatus = '!'; + } + if istatus == '?' && wstatus == '?' { continue } + let mut extra = ""; + + // A commit in a tree is how submodules are stored, so let's go take a + // look at its status. + // + // TODO: check for GIT_FILEMODE_COMMIT + let status = entry.index_to_workdir().and_then(|diff| { + let ignore = SubmoduleIgnore::Unspecified; + diff.new_file().path_bytes() + .and_then(|s| str::from_utf8(s).ok()) + .and_then(|name| repo.submodule_status(name, ignore).ok()) + }); + if let Some(status) = status { + if status.contains(git2::SUBMODULE_STATUS_WD_MODIFIED) { + extra = " (new commits)"; + } else if status.contains(git2::SUBMODULE_STATUS_WD_INDEX_MODIFIED) { + extra = " (modified content)"; + } else if status.contains(git2::SUBMODULE_STATUS_WD_WD_MODIFIED) { + extra = " (modified content)"; + } else if status.contains(git2::SUBMODULE_STATUS_WD_UNTRACKED) { + extra = " (untracked content)"; + } + } + + let (mut a, mut b, mut c) = (None, None, None); + if let Some(diff) = entry.head_to_index() { + a = diff.old_file().path(); + b = diff.new_file().path(); + } + if let Some(diff) = entry.index_to_workdir() { + a = a.or(diff.old_file().path()); + b = b.or(diff.old_file().path()); + c = diff.new_file().path(); + } + + match (istatus, wstatus) { + ('R', 'R') => println!("RR {} {} {}{}", a.unwrap().display(), + b.unwrap().display(), c.unwrap().display(), + extra), + ('R', w) => println!("R{} {} {}{}", w, a.unwrap().display(), + b.unwrap().display(), extra), + (i, 'R') => println!("{}R {} {}{}", i, a.unwrap().display(), + c.unwrap().display(), extra), + (i, w) => println!("{}{} {}{}", i, w, a.unwrap().display(), extra), + } + } + + for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) { + println!("?? {}", entry.index_to_workdir().unwrap().old_file() + .path().unwrap().display()); + } +} + +impl Args { + fn format(&self) -> Format { + if self.flag_short { Format::Short } + else if self.flag_long { Format::Long } + else if self.flag_porcelain { Format::Porcelain } + else if self.flag_z { Format::Porcelain } + else { Format::Long } + } +} + +fn main() { + const USAGE: &'static str = " +usage: status [options] [--] [..] + +Options: + -s, --short show short statuses + --long show longer statuses (default) + --porcelain ?? + -b, --branch show branch information + -z ?? + --ignored show ignored files as well + --untracked-files setting for showing untracked files [no|normal|all] + --ignore-submodules setting for ignoring submodules [all] + --git-dir git directory to analyze + --repeat repeatedly show status, sleeping inbetween + --list-submodules show submodules + -h, --help show this message +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/examples/tag.rs cargo-0.19.0/vendor/git2-0.6.4/examples/tag.rs --- cargo-0.17.0/vendor/git2-0.6.4/examples/tag.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/examples/tag.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,133 @@ +/* + * libgit2 "tag" example - shows how to list, create and delete tags + * + * Written by the libgit2 contributors + * + * To the extent possible under law, the author(s) have dedicated all copyright + * and related and neighboring rights to this software to the public domain + * worldwide. This software is distributed without any warranty. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with this software. If not, see + * . + */ + +#![deny(warnings)] + +extern crate git2; +extern crate docopt; +extern crate rustc_serialize; + +use std::str; +use docopt::Docopt; +use git2::{Repository, Error, Tag, Commit}; + +#[derive(RustcDecodable)] +struct Args { + arg_tagname: Option, + arg_object: Option, + arg_pattern: Option, + flag_n: Option, + flag_force: bool, + flag_list: bool, + flag_delete: Option, + flag_message: Option, +} + +fn run(args: &Args) -> Result<(), Error> { + let repo = try!(Repository::open(".")); + + if let Some(ref name) = args.arg_tagname { + let target = args.arg_object.as_ref().map(|s| &s[..]).unwrap_or("HEAD"); + let obj = try!(repo.revparse_single(target)); + + if let Some(ref message) = args.flag_message { + let sig = try!(repo.signature()); + try!(repo.tag(&name, &obj, &sig, &message, args.flag_force)); + } else { + try!(repo.tag_lightweight(&name, &obj, args.flag_force)); + } + + } else if let Some(ref name) = args.flag_delete { + let obj = try!(repo.revparse_single(name)); + let id = try!(obj.short_id()); + try!(repo.tag_delete(name)); + println!("Deleted tag '{}' (was {})", name, + str::from_utf8(&*id).unwrap()); + + } else if args.flag_list { + let pattern = args.arg_pattern.as_ref().map(|s| &s[..]).unwrap_or("*"); + for name in try!(repo.tag_names(Some(pattern))).iter() { + let name = name.unwrap(); + let obj = try!(repo.revparse_single(name)); + + if let Some(tag) = obj.as_tag() { + print_tag(tag, args); + } else if let Some(commit) = obj.as_commit() { + print_commit(commit, name, args); + } else { + print_name(name); + } + } + } + Ok(()) +} + +fn print_tag(tag: &Tag, args: &Args) { + print!("{:<16}", tag.name().unwrap()); + if args.flag_n.is_some() { + print_list_lines(tag.message(), args); + } else { + println!(""); + } +} + +fn print_commit(commit: &Commit, name: &str, args: &Args) { + print!("{:<16}", name); + if args.flag_n.is_some() { + print_list_lines(commit.message(), args); + } else { + println!(""); + } +} + +fn print_name(name: &str) { + println!("{}", name); +} + +fn print_list_lines(message: Option<&str>, args: &Args) { + let message = match message { Some(s) => s, None => return }; + let mut lines = message.lines().filter(|l| !l.trim().is_empty()); + if let Some(first) = lines.next() { + print!("{}", first); + } + println!(""); + + for line in lines.take(args.flag_n.unwrap_or(0) as usize) { + print!(" {}", line); + } +} + +fn main() { + const USAGE: &'static str = " +usage: + tag [-a] [-f] [-m ] [] + tag -d + tag [-n ] -l [] + +Options: + -n specify number of lines from teh annotation to print + -f, --force replace an existing tag with the given name + -l, --list list tags with names matching the pattern given + -d, --delete delete the tag specified + -m, --message message for a new tag + -h, --help show this message +"; + + let args = Docopt::new(USAGE).and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + match run(&args) { + Ok(()) => {} + Err(e) => println!("error: {}", e), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/.gitignore cargo-0.19.0/vendor/git2-0.6.4/.gitignore --- cargo-0.17.0/vendor/git2-0.6.4/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/.gitignore 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,3 @@ +target +Cargo.lock +src/main.rs diff -Nru cargo-0.17.0/vendor/git2-0.6.4/.gitmodules cargo-0.19.0/vendor/git2-0.6.4/.gitmodules --- cargo-0.17.0/vendor/git2-0.6.4/.gitmodules 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/.gitmodules 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,3 @@ +[submodule "libgit2-sys/libgit2"] + path = libgit2-sys/libgit2 + url = https://github.com/libgit2/libgit2 diff -Nru cargo-0.17.0/vendor/git2-0.6.4/LICENSE-APACHE cargo-0.19.0/vendor/git2-0.6.4/LICENSE-APACHE --- cargo-0.17.0/vendor/git2-0.6.4/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/LICENSE-APACHE 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.17.0/vendor/git2-0.6.4/LICENSE-MIT cargo-0.19.0/vendor/git2-0.6.4/LICENSE-MIT --- cargo-0.17.0/vendor/git2-0.6.4/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/LICENSE-MIT 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/git2-0.6.4/README.md cargo-0.19.0/vendor/git2-0.6.4/README.md --- cargo-0.17.0/vendor/git2-0.6.4/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/README.md 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,52 @@ +# git2-rs + +[![Build Status](https://travis-ci.org/alexcrichton/git2-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/git2-rs) +[![Build Status](https://ci.appveyor.com/api/projects/status/6vem3xgno2kuxnfm?svg=true)](https://ci.appveyor.com/project/alexcrichton/git2-rs) + +[Documentation](https://docs.rs/git2) + +libgit2 bindings for Rust + +```toml +[dependencies] +git2 = "0.6" +``` + +## Building git2-rs + +First, you'll need to install _CMake_. Afterwards, just run: + +```sh +$ git clone https://github.com/alexcrichton/git2-rs +$ cd git2-rs +$ cargo build +``` + +## Building on OSX 10.10+ + +Currently libssh2 requires linking against OpenSSL, and to compile libssh2 it +also needs to find the OpenSSL headers. On OSX 10.10+ the OpenSSL headers have +been removed, but if you're using Homebrew you can install them via: + +```sh +brew install openssl +``` + +To get this library to pick them up the [standard `rust-openssl` +instructions][instr] can be used to transitively inform libssh2-sys about where +the header files are: + +[instr]: https://github.com/sfackler/rust-openssl#osx + +```sh +export OPENSSL_INCLUDE_DIR=`brew --prefix openssl`/include +export OPENSSL_LIB_DIR=`brew --prefix openssl`/lib +``` + +# License + +`git2-rs` is primarily distributed under the terms of both the MIT license and +the Apache License (Version 2.0), with portions covered by various BSD-like +licenses. + +See LICENSE-APACHE, and LICENSE-MIT for details. diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/blame.rs cargo-0.19.0/vendor/git2-0.6.4/src/blame.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/blame.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/blame.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,303 @@ +use std::marker; +use {raw, Repository, Oid, signature, Signature}; +use util::{self, Binding}; +use std::path::Path; +use std::ops::Range; +use std::mem; + +/// Opaque structure to hold blame results. +pub struct Blame<'repo> { + raw: *mut raw::git_blame, + _marker: marker::PhantomData<&'repo Repository>, +} + +/// Structure that represents a blame hunk. +pub struct BlameHunk<'blame> { + raw: *mut raw::git_blame_hunk, + _marker: marker::PhantomData<&'blame raw::git_blame>, +} + +/// Blame options +pub struct BlameOptions { + raw: raw::git_blame_options, +} + +/// An iterator over the hunks in a blame. +pub struct BlameIter<'blame> { + range: Range, + blame: &'blame Blame<'blame>, +} + +impl<'repo> Blame<'repo> { + + /// Gets the number of hunks that exist in the blame structure. + pub fn len(&self) -> usize { + unsafe { raw::git_blame_get_hunk_count(self.raw) as usize } + } + + /// Gets the blame hunk at the given index. + pub fn get_index(&self, index: usize) -> Option { + unsafe { + let ptr = raw::git_blame_get_hunk_byindex(self.raw(), index as u32); + if ptr.is_null() { + None + } else { + Some(BlameHunk::from_raw_const(ptr)) + } + } + } + + /// Gets the hunk that relates to the given line number in the newest + /// commit. + pub fn get_line(&self, lineno: usize) -> Option { + unsafe { + let ptr = raw::git_blame_get_hunk_byline(self.raw(), lineno); + if ptr.is_null() { + None + } else { + Some(BlameHunk::from_raw_const(ptr)) + } + } + } + + /// Returns an iterator over the hunks in this blame. + pub fn iter(&self) -> BlameIter { + BlameIter { range: 0..self.len(), blame: self } + } + +} + +impl<'blame> BlameHunk<'blame> { + + unsafe fn from_raw_const(raw: *const raw::git_blame_hunk) + -> BlameHunk<'blame> { + BlameHunk { + raw: raw as *mut raw::git_blame_hunk, + _marker: marker::PhantomData, + } + } + + /// Returns OID of the commit where this line was last changed + pub fn final_commit_id(&self) -> Oid { + unsafe { Oid::from_raw(&(*self.raw).final_commit_id) } + } + + /// Returns signature of the commit. + pub fn final_signature(&self) -> Signature { + unsafe { signature::from_raw_const(self, (*self.raw).final_signature) } + } + + /// Returns line number where this hunk begins. + /// + /// Note that the start line is counting from 1. + pub fn final_start_line(&self) -> usize { + unsafe { (*self.raw).final_start_line_number } + } + + /// Returns the OID of the commit where this hunk was found. + /// + /// This will usually be the same as `final_commit_id`, + /// except when `BlameOptions::track_copies_any_commit_copies` has been + /// turned on + pub fn orig_commit_id(&self) -> Oid { + unsafe { Oid::from_raw(&(*self.raw).orig_commit_id) } + } + + /// Returns signature of the commit. + pub fn orig_signature(&self) -> Signature { + unsafe { signature::from_raw_const(self, (*self.raw).orig_signature) } + } + + /// Returns line number where this hunk begins. + /// + /// Note that the start line is counting from 1. + pub fn orig_start_line(&self) -> usize { + unsafe { (*self.raw).orig_start_line_number} + } + + /// Returns path to the file where this hunk originated. + /// + /// Note: `None` could be returned for non-unicode paths on Widnows. + pub fn path(&self) -> Option<&Path> { + unsafe { + if let Some(bytes) = ::opt_bytes(self, (*self.raw).orig_path) { + Some(util::bytes2path(bytes)) + } else { + None + } + } + } + + /// Tests whether this hunk has been tracked to a boundary commit + /// (the root, or the commit specified in git_blame_options.oldest_commit). + pub fn is_boundary(&self) -> bool { + unsafe { (*self.raw).boundary == 1 } + } + + /// Returns number of lines in this hunk. + pub fn lines_in_hunk(&self) -> usize { + unsafe { (*self.raw).lines_in_hunk as usize } + } +} + +impl BlameOptions { + + /// Initialize options + pub fn new() -> BlameOptions { + unsafe { + let mut raw: raw::git_blame_options = mem::zeroed(); + assert_eq!( + raw::git_blame_init_options(&mut raw, + raw::GIT_BLAME_OPTIONS_VERSION) + , 0); + + Binding::from_raw(&raw as *const _ as *mut _) + } + } + + fn flag(&mut self, opt: u32, val: bool) -> &mut BlameOptions { + if val { + self.raw.flags |= opt; + } else { + self.raw.flags &= !opt; + } + self + } + + /// Track lines that have moved within a file. + pub fn track_copies_same_file(&mut self, opt: bool) -> &mut BlameOptions { + self.flag(raw::GIT_BLAME_TRACK_COPIES_SAME_FILE, opt) + } + + /// Track lines that have moved across files in the same commit. + pub fn track_copies_same_commit_moves(&mut self, opt: bool) -> &mut BlameOptions { + self.flag(raw::GIT_BLAME_TRACK_COPIES_SAME_COMMIT_MOVES, opt) + } + + /// Track lines that have been copied from another file that exists + /// in the same commit. + pub fn track_copies_same_commit_copies(&mut self, opt: bool) -> &mut BlameOptions { + self.flag(raw::GIT_BLAME_TRACK_COPIES_SAME_COMMIT_COPIES, opt) + } + + /// Track lines that have been copied from another file that exists + /// in any commit. + pub fn track_copies_any_commit_copies(&mut self, opt: bool) -> &mut BlameOptions { + self.flag(raw::GIT_BLAME_TRACK_COPIES_ANY_COMMIT_COPIES, opt) + } + + /// Restrict the search of commits to those reachable following only + /// the first parents. + pub fn first_parent(&mut self, opt: bool) -> &mut BlameOptions { + self.flag(raw::GIT_BLAME_FIRST_PARENT, opt) + } + + /// Setter for the id of the newest commit to consider. + pub fn newest_commit(&mut self, id: Oid) -> &mut BlameOptions { + unsafe { self.raw.newest_commit = *id.raw(); } + self + } + + /// Setter for the id of the oldest commit to consider. + pub fn oldest_commit(&mut self, id: Oid) -> &mut BlameOptions { + unsafe { self.raw.oldest_commit = *id.raw(); } + self + } + +} + +impl<'repo> Binding for Blame<'repo> { + type Raw = *mut raw::git_blame; + + unsafe fn from_raw(raw: *mut raw::git_blame) -> Blame<'repo> { + Blame { raw: raw, _marker: marker::PhantomData } + } + + fn raw(&self) -> *mut raw::git_blame { self.raw } +} + +impl<'repo> Drop for Blame<'repo> { + fn drop(&mut self) { + unsafe { raw::git_blame_free(self.raw) } + } +} + +impl<'blame> Binding for BlameHunk<'blame> { + type Raw = *mut raw::git_blame_hunk; + + unsafe fn from_raw(raw: *mut raw::git_blame_hunk) -> BlameHunk<'blame> { + BlameHunk { raw: raw, _marker: marker::PhantomData } + } + + fn raw(&self) -> *mut raw::git_blame_hunk { self.raw } +} + +impl Binding for BlameOptions { + type Raw = *mut raw::git_blame_options; + + unsafe fn from_raw(opts: *mut raw::git_blame_options) -> BlameOptions { + BlameOptions { raw: *opts } + } + + fn raw(&self) -> *mut raw::git_blame_options { + &self.raw as *const _ as *mut _ + } +} + +impl<'blame> Iterator for BlameIter<'blame> { + type Item = BlameHunk<'blame>; + fn next(&mut self) -> Option> { + self.range.next().and_then(|i| self.blame.get_index(i)) + } + + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} + +impl<'blame> DoubleEndedIterator for BlameIter<'blame> { + fn next_back(&mut self) -> Option> { + self.range.next_back().and_then(|i| self.blame.get_index(i)) + } +} + +impl<'blame> ExactSizeIterator for BlameIter<'blame> {} + +#[cfg(test)] +mod tests { + use std::fs::{self, File}; + use std::path::Path; + + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + let mut index = repo.index().unwrap(); + + let root = repo.path().parent().unwrap(); + fs::create_dir(&root.join("foo")).unwrap(); + File::create(&root.join("foo/bar")).unwrap(); + index.add_path(Path::new("foo/bar")).unwrap(); + + let id = index.write_tree().unwrap(); + let tree = repo.find_tree(id).unwrap(); + let sig = repo.signature().unwrap(); + let id = repo.refname_to_id("HEAD").unwrap(); + let parent = repo.find_commit(id).unwrap(); + let commit = repo.commit(Some("HEAD"), &sig, &sig, "commit", + &tree, &[&parent]).unwrap(); + + let blame = repo.blame_file(Path::new("foo/bar"), None).unwrap(); + + assert_eq!(blame.len(), 1); + assert_eq!(blame.iter().count(), 1); + + let hunk = blame.get_index(0).unwrap(); + assert_eq!(hunk.final_commit_id(), commit); + assert_eq!(hunk.final_signature().name(), sig.name()); + assert_eq!(hunk.final_signature().email(), sig.email()); + assert_eq!(hunk.final_start_line(), 1); + assert_eq!(hunk.path(), Some(Path::new("foo/bar"))); + assert_eq!(hunk.lines_in_hunk(), 0); + assert!(!hunk.is_boundary()) + } + +} + diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/blob.rs cargo-0.19.0/vendor/git2-0.6.4/src/blob.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/blob.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/blob.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,104 @@ +use std::marker; +use std::mem; +use std::slice; + +use {raw, Oid, Object}; +use util::Binding; + +/// A structure to represent a git [blob][1] +/// +/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects +pub struct Blob<'repo> { + raw: *mut raw::git_blob, + _marker: marker::PhantomData>, +} + +impl<'repo> Blob<'repo> { + /// Get the id (SHA1) of a repository blob + pub fn id(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_blob_id(&*self.raw)) } + } + + /// Determine if the blob content is most certainly binary or not. + pub fn is_binary(&self) -> bool { + unsafe { raw::git_blob_is_binary(&*self.raw) == 1 } + } + + /// Get the content of this blob. + pub fn content(&self) -> &[u8] { + unsafe { + let data = raw::git_blob_rawcontent(&*self.raw) as *const u8; + let len = raw::git_blob_rawsize(&*self.raw) as usize; + slice::from_raw_parts(data, len) + } + } + + /// Casts this Blob to be usable as an `Object` + pub fn as_object(&self) -> &Object<'repo> { + unsafe { + &*(self as *const _ as *const Object<'repo>) + } + } + + /// Consumes Blob to be returned as an `Object` + pub fn into_object(self) -> Object<'repo> { + assert_eq!(mem::size_of_val(&self), mem::size_of::()); + unsafe { + mem::transmute(self) + } + } +} + +impl<'repo> Binding for Blob<'repo> { + type Raw = *mut raw::git_blob; + + unsafe fn from_raw(raw: *mut raw::git_blob) -> Blob<'repo> { + Blob { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *mut raw::git_blob { self.raw } +} + + +impl<'repo> Drop for Blob<'repo> { + fn drop(&mut self) { + unsafe { raw::git_blob_free(self.raw) } + } +} + +#[cfg(test)] +mod tests { + use std::io::prelude::*; + use std::fs::File; + use tempdir::TempDir; + use Repository; + + #[test] + fn buffer() { + let td = TempDir::new("test").unwrap(); + let repo = Repository::init(td.path()).unwrap(); + let id = repo.blob(&[5, 4, 6]).unwrap(); + let blob = repo.find_blob(id).unwrap(); + + assert_eq!(blob.id(), id); + assert_eq!(blob.content(), [5, 4, 6]); + assert!(blob.is_binary()); + + repo.find_object(id, None).unwrap().as_blob().unwrap(); + repo.find_object(id, None).unwrap().into_blob().ok().unwrap(); + } + + #[test] + fn path() { + let td = TempDir::new("test").unwrap(); + let path = td.path().join("foo"); + File::create(&path).unwrap().write_all(&[7, 8, 9]).unwrap(); + let repo = Repository::init(td.path()).unwrap(); + let id = repo.blob_path(&path).unwrap(); + let blob = repo.find_blob(id).unwrap(); + assert_eq!(blob.content(), [7, 8, 9]); + blob.into_object(); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/branch.rs cargo-0.19.0/vendor/git2-0.6.4/src/branch.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/branch.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/branch.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,162 @@ +use std::ffi::CString; +use std::marker; +use std::str; +use libc; + +use {raw, Error, Reference, BranchType, References}; +use util::Binding; + +/// A structure to represent a git [branch][1] +/// +/// A branch is currently just a wrapper to an underlying `Reference`. The +/// reference can be accessed through the `get` and `unwrap` methods. +/// +/// [1]: http://git-scm.com/book/en/Git-Branching-What-a-Branch-Is +pub struct Branch<'repo> { + inner: Reference<'repo>, +} + +/// An iterator over the branches inside of a repository. +pub struct Branches<'repo> { + raw: *mut raw::git_branch_iterator, + _marker: marker::PhantomData>, +} + +impl<'repo> Branch<'repo> { + /// Creates a new branch from a reference + pub fn wrap(reference: Reference) -> Branch { Branch { inner: reference } } + + /// Gain access to the reference that is this branch + pub fn get(&self) -> &Reference<'repo> { &self.inner } + + /// Take ownership of the underlying reference. + pub fn into_reference(self) -> Reference<'repo> { self.inner } + + /// Delete an existing branch reference. + pub fn delete(&mut self) -> Result<(), Error> { + unsafe { try_call!(raw::git_branch_delete(self.get().raw())); } + Ok(()) + } + + /// Determine if the current local branch is pointed at by HEAD. + pub fn is_head(&self) -> bool { + unsafe { raw::git_branch_is_head(&*self.get().raw()) == 1 } + } + + /// Move/rename an existing local branch reference. + pub fn rename(&mut self, new_branch_name: &str, force: bool) + -> Result, Error> { + let mut ret = 0 as *mut raw::git_reference; + let new_branch_name = try!(CString::new(new_branch_name)); + unsafe { + try_call!(raw::git_branch_move(&mut ret, self.get().raw(), + new_branch_name, force)); + Ok(Branch::wrap(Binding::from_raw(ret))) + } + } + + /// Return the name of the given local or remote branch. + /// + /// May return `Ok(None)` if the name is not valid utf-8. + pub fn name(&self) -> Result, Error> { + self.name_bytes().map(|s| str::from_utf8(s).ok()) + } + + /// Return the name of the given local or remote branch. + pub fn name_bytes(&self) -> Result<&[u8], Error> { + let mut ret = 0 as *const libc::c_char; + unsafe { + try_call!(raw::git_branch_name(&mut ret, &*self.get().raw())); + Ok(::opt_bytes(self, ret).unwrap()) + } + } + + /// Return the reference supporting the remote tracking branch, given a + /// local branch reference. + pub fn upstream<'a>(&'a self) -> Result, Error> { + let mut ret = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_branch_upstream(&mut ret, &*self.get().raw())); + Ok(Branch::wrap(Binding::from_raw(ret))) + } + } + + /// Set the upstream configuration for a given local branch. + /// + /// If `None` is specified, then the upstream branch is unset. The name + /// provided is the name of the branch to set as upstream. + pub fn set_upstream(&mut self, + upstream_name: Option<&str>) -> Result<(), Error> { + let upstream_name = try!(::opt_cstr(upstream_name)); + unsafe { + try_call!(raw::git_branch_set_upstream(self.get().raw(), + upstream_name)); + Ok(()) + } + } +} + +impl<'repo> Branches<'repo> { + /// Creates a new iterator from the raw pointer given. + /// + /// This function is unsafe as it is not guaranteed that `raw` is a valid + /// pointer. + pub unsafe fn from_raw(raw: *mut raw::git_branch_iterator) + -> Branches<'repo> { + Branches { + raw: raw, + _marker: marker::PhantomData, + } + } +} + +impl<'repo> Iterator for Branches<'repo> { + type Item = Result<(Branch<'repo>, BranchType), Error>; + fn next(&mut self) -> Option, BranchType), Error>> { + let mut ret = 0 as *mut raw::git_reference; + let mut typ = raw::GIT_BRANCH_LOCAL; + unsafe { + try_call_iter!(raw::git_branch_next(&mut ret, &mut typ, self.raw)); + let typ = match typ { + raw::GIT_BRANCH_LOCAL => BranchType::Local, + raw::GIT_BRANCH_REMOTE => BranchType::Remote, + n => panic!("unexected branch type: {}", n), + }; + Some(Ok((Branch::wrap(Binding::from_raw(ret)), typ))) + } + } +} + +impl<'repo> Drop for Branches<'repo> { + fn drop(&mut self) { + unsafe { raw::git_branch_iterator_free(self.raw) } + } +} + +#[cfg(test)] +mod tests { + use BranchType; + + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + let head = repo.head().unwrap(); + let target = head.target().unwrap(); + let commit = repo.find_commit(target).unwrap(); + + let mut b1 = repo.branch("foo", &commit, false).unwrap(); + assert!(!b1.is_head()); + repo.branch("foo2", &commit, false).unwrap(); + + assert_eq!(repo.branches(None).unwrap().count(), 3); + repo.find_branch("foo", BranchType::Local).unwrap(); + let mut b1 = b1.rename("bar", false).unwrap(); + assert_eq!(b1.name().unwrap(), Some("bar")); + assert!(b1.upstream().is_err()); + b1.set_upstream(Some("master")).unwrap(); + b1.upstream().unwrap(); + b1.set_upstream(None).unwrap(); + + b1.delete().unwrap(); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/buf.rs cargo-0.19.0/vendor/git2-0.6.4/src/buf.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/buf.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/buf.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,67 @@ +use std::slice; +use std::str; +use std::ops::{Deref, DerefMut}; +use libc; + +use raw; +use util::Binding; + +/// A structure to wrap an intermediate buffer used by libgit2. +/// +/// A buffer can be thought of a `Vec`, but the `Vec` type is not used to +/// avoid copying data back and forth. +pub struct Buf { + raw: raw::git_buf, +} + +impl Buf { + /// Creates a new empty buffer. + pub fn new() -> Buf { + ::init(); + unsafe { + Binding::from_raw(&mut raw::git_buf { + ptr: 0 as *mut libc::c_char, + size: 0, + asize: 0, + } as *mut _) + } + } + + /// Attempt to view this buffer as a string slice. + /// + /// Returns `None` if the buffer is not valid utf-8. + pub fn as_str(&self) -> Option<&str> { str::from_utf8(&**self).ok() } +} + +impl Deref for Buf { + type Target = [u8]; + fn deref(&self) -> &[u8] { + unsafe { + slice::from_raw_parts(self.raw.ptr as *const u8, + self.raw.size as usize) + } + } +} + +impl DerefMut for Buf { + fn deref_mut(&mut self) -> &mut [u8] { + unsafe { + slice::from_raw_parts_mut(self.raw.ptr as *mut u8, + self.raw.size as usize) + } + } +} + +impl Binding for Buf { + type Raw = *mut raw::git_buf; + unsafe fn from_raw(raw: *mut raw::git_buf) -> Buf { + Buf { raw: *raw } + } + fn raw(&self) -> *mut raw::git_buf { &self.raw as *const _ as *mut _ } +} + +impl Drop for Buf { + fn drop(&mut self) { + unsafe { raw::git_buf_free(&mut self.raw) } + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/build.rs cargo-0.19.0/vendor/git2-0.6.4/src/build.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/build.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/build.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,548 @@ +//! Builder-pattern objects for configuration various git operations. + +use std::ffi::{CStr, CString}; +use std::mem; +use std::path::Path; +use libc::{c_char, size_t, c_void, c_uint, c_int}; + +use {raw, panic, Error, Repository, FetchOptions, IntoCString}; +use {CheckoutNotificationType, DiffFile}; +use util::{self, Binding}; + +/// A builder struct which is used to build configuration for cloning a new git +/// repository. +pub struct RepoBuilder<'cb> { + bare: bool, + branch: Option, + local: bool, + hardlinks: bool, + checkout: Option>, + fetch_opts: Option>, +} + +/// A builder struct for configuring checkouts of a repository. +pub struct CheckoutBuilder<'cb> { + their_label: Option, + our_label: Option, + ancestor_label: Option, + target_dir: Option, + paths: Vec, + path_ptrs: Vec<*const c_char>, + file_perm: Option, + dir_perm: Option, + disable_filters: bool, + checkout_opts: u32, + progress: Option>>, + notify: Option>>, + notify_flags: CheckoutNotificationType, +} + +/// Checkout progress notification callback. +/// +/// The first argument is the path for the notification, the next is the numver +/// of completed steps so far, and the final is the total number of steps. +pub type Progress<'a> = FnMut(Option<&Path>, usize, usize) + 'a; + +/// Checkout notifications callback. +/// +/// The first argument is the notification type, the next is the path for the +/// the notification, followed by the baseline diff, target diff, and workdir diff. +/// +/// The callback must return a bool specifying whether the checkout should +/// continue. +pub type Notify<'a> = FnMut(CheckoutNotificationType, Option<&Path>, DiffFile, + DiffFile, DiffFile) -> bool + 'a; + +impl<'cb> RepoBuilder<'cb> { + /// Creates a new repository builder with all of the default configuration. + /// + /// When ready, the `clone()` method can be used to clone a new repository + /// using this configuration. + pub fn new() -> RepoBuilder<'cb> { + ::init(); + RepoBuilder { + bare: false, + branch: None, + local: true, + hardlinks: true, + checkout: None, + fetch_opts: None, + } + } + + /// Indicate whether the repository will be cloned as a bare repository or + /// not. + pub fn bare(&mut self, bare: bool) -> &mut RepoBuilder<'cb> { + self.bare = bare; + self + } + + /// Specify the name of the branch to check out after the clone. + /// + /// If not specified, the remote's default branch will be used. + pub fn branch(&mut self, branch: &str) -> &mut RepoBuilder<'cb> { + self.branch = Some(CString::new(branch).unwrap()); + self + } + + /// Set the flag for bypassing the git aware transport mechanism for local + /// paths. + /// + /// If `true`, the git-aware transport will be bypassed for local paths. If + /// `false`, the git-aware transport will not be bypassed. + pub fn local(&mut self, local: bool) -> &mut RepoBuilder<'cb> { + self.local = local; + self + } + + /// Set the flag for whether hardlinks are used when using a local git-aware + /// transport mechanism. + pub fn hardlinks(&mut self, links: bool) -> &mut RepoBuilder<'cb> { + self.hardlinks = links; + self + } + + /// Configure the checkout which will be performed by consuming a checkout + /// builder. + pub fn with_checkout(&mut self, checkout: CheckoutBuilder<'cb>) + -> &mut RepoBuilder<'cb> { + self.checkout = Some(checkout); + self + } + + /// Options which control the fetch, including callbacks. + /// + /// The callbacks are used for reporting fetch progress, and for acquiring + /// credentials in the event they are needed. + pub fn fetch_options(&mut self, fetch_opts: FetchOptions<'cb>) + -> &mut RepoBuilder<'cb> { + self.fetch_opts = Some(fetch_opts); + self + } + + /// Clone a remote repository. + /// + /// This will use the options configured so far to clone the specified url + /// into the specified local path. + pub fn clone(&mut self, url: &str, into: &Path) -> Result { + let mut opts: raw::git_clone_options = unsafe { mem::zeroed() }; + unsafe { + try_call!(raw::git_clone_init_options(&mut opts, + raw::GIT_CLONE_OPTIONS_VERSION)); + } + opts.bare = self.bare as c_int; + opts.checkout_branch = self.branch.as_ref().map(|s| { + s.as_ptr() + }).unwrap_or(0 as *const _); + + opts.local = match (self.local, self.hardlinks) { + (true, false) => raw::GIT_CLONE_LOCAL_NO_LINKS, + (false, _) => raw::GIT_CLONE_NO_LOCAL, + (true, _) => raw::GIT_CLONE_LOCAL_AUTO, + }; + opts.checkout_opts.checkout_strategy = + raw::GIT_CHECKOUT_SAFE as c_uint; + + match self.fetch_opts { + Some(ref mut cbs) => { + opts.fetch_opts = cbs.raw(); + }, + None => {} + } + + match self.checkout { + Some(ref mut c) => unsafe { c.configure(&mut opts.checkout_opts) }, + None => {} + } + + let url = try!(CString::new(url)); + let into = try!(into.into_c_string()); + let mut raw = 0 as *mut raw::git_repository; + unsafe { + try_call!(raw::git_clone(&mut raw, url, into, &opts)); + Ok(Binding::from_raw(raw)) + } + } +} + +impl<'cb> CheckoutBuilder<'cb> { + /// Creates a new builder for checkouts with all of its default + /// configuration. + pub fn new() -> CheckoutBuilder<'cb> { + ::init(); + CheckoutBuilder { + disable_filters: false, + dir_perm: None, + file_perm: None, + path_ptrs: Vec::new(), + paths: Vec::new(), + target_dir: None, + ancestor_label: None, + our_label: None, + their_label: None, + checkout_opts: raw::GIT_CHECKOUT_SAFE as u32, + progress: None, + notify: None, + notify_flags: CheckoutNotificationType::empty(), + } + } + + /// Indicate that this checkout should perform a dry run by checking for + /// conflicts but not make any actual changes. + pub fn dry_run(&mut self) -> &mut CheckoutBuilder<'cb> { + self.checkout_opts &= !((1 << 4) - 1); + self.checkout_opts |= raw::GIT_CHECKOUT_NONE as u32; + self + } + + /// Take any action necessary to get the working directory to match the + /// target including potentially discarding modified files. + pub fn force(&mut self) -> &mut CheckoutBuilder<'cb> { + self.checkout_opts &= !((1 << 4) - 1); + self.checkout_opts |= raw::GIT_CHECKOUT_FORCE as u32; + self + } + + /// Indicate that the checkout should be performed safely, allowing new + /// files to be created but not overwriting extisting files or changes. + /// + /// This is the default. + pub fn safe(&mut self) -> &mut CheckoutBuilder<'cb> { + self.checkout_opts &= !((1 << 4) - 1); + self.checkout_opts |= raw::GIT_CHECKOUT_SAFE as u32; + self + } + + fn flag(&mut self, bit: raw::git_checkout_strategy_t, + on: bool) -> &mut CheckoutBuilder<'cb> { + if on { + self.checkout_opts |= bit as u32; + } else { + self.checkout_opts &= !(bit as u32); + } + self + } + + /// In safe mode, create files that don't exist. + /// + /// Defaults to false. + pub fn recreate_missing(&mut self, allow: bool) -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_RECREATE_MISSING, allow) + } + + /// In safe mode, apply safe file updates even when there are conflicts + /// instead of canceling the checkout. + /// + /// Defaults to false. + pub fn allow_conflicts(&mut self, allow: bool) -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_ALLOW_CONFLICTS, allow) + } + + /// Remove untracked files from the working dir. + /// + /// Defaults to false. + pub fn remove_untracked(&mut self, remove: bool) + -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_REMOVE_UNTRACKED, remove) + } + + /// Remove ignored files from the working dir. + /// + /// Defaults to false. + pub fn remove_ignored(&mut self, remove: bool) -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_REMOVE_IGNORED, remove) + } + + /// Only update the contents of files that already exist. + /// + /// If set, files will not be created or deleted. + /// + /// Defaults to false. + pub fn update_only(&mut self, update: bool) -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_UPDATE_ONLY, update) + } + + /// Prevents checkout from writing the updated files' information to the + /// index. + /// + /// Defaults to true. + pub fn update_index(&mut self, update: bool) -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_DONT_UPDATE_INDEX, !update) + } + + /// Indicate whether the index and git attributes should be refreshed from + /// disk before any operations. + /// + /// Defaults to true, + pub fn refresh(&mut self, refresh: bool) -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_NO_REFRESH, !refresh) + } + + /// Skip files with unmerged index entries. + /// + /// Defaults to false. + pub fn skip_unmerged(&mut self, skip: bool) -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_SKIP_UNMERGED, skip) + } + + /// Indicate whether the checkout should proceed on conflicts by using the + /// stage 2 version of the file ("ours"). + /// + /// Defaults to false. + pub fn use_ours(&mut self, ours: bool) -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_USE_OURS, ours) + } + + /// Indicate whether the checkout should proceed on conflicts by using the + /// stage 3 version of the file ("theirs"). + /// + /// Defaults to false. + pub fn use_theirs(&mut self, theirs: bool) -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_USE_THEIRS, theirs) + } + + /// Indicate whether ignored files should be overwritten during the checkout. + /// + /// Defaults to true. + pub fn overwrite_ignored(&mut self, overwrite: bool) + -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_DONT_OVERWRITE_IGNORED, !overwrite) + } + + /// Indicate whether a normal merge file should be written for conflicts. + /// + /// Defaults to false. + pub fn conflict_style_merge(&mut self, on: bool) + -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_CONFLICT_STYLE_MERGE, on) + } + + /// Specify for which notification types to invoke the notification + /// callback. + /// + /// Defaults to none. + pub fn notify_on(&mut self, notification_types: CheckoutNotificationType) + -> &mut CheckoutBuilder<'cb> { + self.notify_flags = notification_types; + self + } + + /// Indicates whether to include common ancestor data in diff3 format files + /// for conflicts. + /// + /// Defaults to false. + pub fn conflict_style_diff3(&mut self, on: bool) + -> &mut CheckoutBuilder<'cb> { + self.flag(raw::GIT_CHECKOUT_CONFLICT_STYLE_DIFF3, on) + } + + /// Indicate whether to apply filters like CRLF conversion. + pub fn disable_filters(&mut self, disable: bool) + -> &mut CheckoutBuilder<'cb> { + self.disable_filters = disable; + self + } + + /// Set the mode with which new directories are created. + /// + /// Default is 0755 + pub fn dir_perm(&mut self, perm: i32) -> &mut CheckoutBuilder<'cb> { + self.dir_perm = Some(perm); + self + } + + /// Set the mode with which new files are created. + /// + /// The default is 0644 or 0755 as dictated by the blob. + pub fn file_perm(&mut self, perm: i32) -> &mut CheckoutBuilder<'cb> { + self.file_perm = Some(perm); + self + } + + /// Add a path to be checked out. + /// + /// If no paths are specified, then all files are checked out. Otherwise + /// only these specified paths are checked out. + pub fn path(&mut self, path: T) + -> &mut CheckoutBuilder<'cb> { + let path = path.into_c_string().unwrap(); + self.path_ptrs.push(path.as_ptr()); + self.paths.push(path); + self + } + + /// Set the directory to check out to + pub fn target_dir(&mut self, dst: &Path) -> &mut CheckoutBuilder<'cb> { + self.target_dir = Some(dst.into_c_string().unwrap()); + self + } + + /// The name of the common ancestor side of conflicts + pub fn ancestor_label(&mut self, label: &str) -> &mut CheckoutBuilder<'cb> { + self.ancestor_label = Some(CString::new(label).unwrap()); + self + } + + /// The name of the common our side of conflicts + pub fn our_label(&mut self, label: &str) -> &mut CheckoutBuilder<'cb> { + self.our_label = Some(CString::new(label).unwrap()); + self + } + + /// The name of the common their side of conflicts + pub fn their_label(&mut self, label: &str) -> &mut CheckoutBuilder<'cb> { + self.their_label = Some(CString::new(label).unwrap()); + self + } + + /// Set a callback to receive notifications of checkout progress. + pub fn progress(&mut self, cb: F) -> &mut CheckoutBuilder<'cb> + where F: FnMut(Option<&Path>, usize, usize) + 'cb { + self.progress = Some(Box::new(cb) as Box>); + self + } + + /// Set a callback to receive checkout notifications. + /// + /// Callbacks are invoked prior to modifying any files on disk. + /// Returning `false` from the callback will cancel the checkout. + pub fn notify(&mut self, cb: F) -> &mut CheckoutBuilder<'cb> + where F: FnMut(CheckoutNotificationType, Option<&Path>, DiffFile, + DiffFile, DiffFile) -> bool + 'cb + { + self.notify = Some(Box::new(cb) as Box>); + self + } + + /// Configure a raw checkout options based on this configuration. + /// + /// This method is unsafe as there is no guarantee that this structure will + /// outlive the provided checkout options. + pub unsafe fn configure(&mut self, opts: &mut raw::git_checkout_options) { + opts.version = raw::GIT_CHECKOUT_OPTIONS_VERSION; + opts.disable_filters = self.disable_filters as c_int; + opts.dir_mode = self.dir_perm.unwrap_or(0) as c_uint; + opts.file_mode = self.file_perm.unwrap_or(0) as c_uint; + + if self.path_ptrs.len() > 0 { + opts.paths.strings = self.path_ptrs.as_ptr() as *mut _; + opts.paths.count = self.path_ptrs.len() as size_t; + } + + match self.target_dir { + Some(ref c) => opts.target_directory = c.as_ptr(), + None => {} + } + match self.ancestor_label { + Some(ref c) => opts.ancestor_label = c.as_ptr(), + None => {} + } + match self.our_label { + Some(ref c) => opts.our_label = c.as_ptr(), + None => {} + } + match self.their_label { + Some(ref c) => opts.their_label = c.as_ptr(), + None => {} + } + if self.progress.is_some() { + let f: raw::git_checkout_progress_cb = progress_cb; + opts.progress_cb = Some(f); + opts.progress_payload = self as *mut _ as *mut _; + } + if self.notify.is_some() { + let f: raw::git_checkout_notify_cb = notify_cb; + opts.notify_cb = Some(f); + opts.notify_payload = self as *mut _ as *mut _; + opts.notify_flags = self.notify_flags.bits() as c_uint; + } + opts.checkout_strategy = self.checkout_opts as c_uint; + } +} + +extern fn progress_cb(path: *const c_char, + completed: size_t, + total: size_t, + data: *mut c_void) { + panic::wrap(|| unsafe { + let payload = &mut *(data as *mut CheckoutBuilder); + let callback = match payload.progress { + Some(ref mut c) => c, + None => return, + }; + let path = if path.is_null() { + None + } else { + Some(util::bytes2path(CStr::from_ptr(path).to_bytes())) + }; + callback(path, completed as usize, total as usize) + }); +} + +extern fn notify_cb(why: raw::git_checkout_notify_t, + path: *const c_char, + baseline: *const raw::git_diff_file, + target: *const raw::git_diff_file, + workdir: *const raw::git_diff_file, + data: *mut c_void) -> c_int { + // pack callback etc + panic::wrap(|| unsafe { + let payload = &mut *(data as *mut CheckoutBuilder); + let callback = match payload.notify { + Some(ref mut c) => c, + None => return 0, + }; + let path = if path.is_null() { + None + } else { + Some(util::bytes2path(CStr::from_ptr(path).to_bytes())) + }; + + let why = CheckoutNotificationType::from_bits_truncate(why as u32); + let keep_going = callback(why, + path, + DiffFile::from_raw(baseline), + DiffFile::from_raw(target), + DiffFile::from_raw(workdir)); + if keep_going {0} else {1} + }).unwrap_or(2) +} + +#[cfg(test)] +mod tests { + use std::fs; + use std::path::Path; + use tempdir::TempDir; + use super::RepoBuilder; + use Repository; + + #[test] + fn smoke() { + let r = RepoBuilder::new().clone("/path/to/nowhere", Path::new("foo")); + assert!(r.is_err()); + } + + #[test] + fn smoke2() { + let td = TempDir::new("test").unwrap(); + Repository::init_bare(&td.path().join("bare")).unwrap(); + let url = if cfg!(unix) { + format!("file://{}/bare", td.path().display()) + } else { + format!("file:///{}/bare", td.path().display().to_string() + .replace("\\", "/")) + }; + + let dst = td.path().join("foo"); + RepoBuilder::new().clone(&url, &dst).unwrap(); + fs::remove_dir_all(&dst).unwrap(); + RepoBuilder::new().local(false).clone(&url, &dst).unwrap(); + fs::remove_dir_all(&dst).unwrap(); + RepoBuilder::new().local(false).hardlinks(false).bare(true) + .clone(&url, &dst).unwrap(); + fs::remove_dir_all(&dst).unwrap(); + assert!(RepoBuilder::new().branch("foo") + .clone(&url, &dst).is_err()); + } + +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/call.rs cargo-0.19.0/vendor/git2-0.6.4/src/call.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/call.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/call.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,217 @@ +#![macro_use] +use libc; + +use Error; + +macro_rules! call { + (raw::$p:ident ($($e:expr),*)) => ( + raw::$p($(::call::convert(&$e)),*) + ) +} + +macro_rules! try_call { + (raw::$p:ident ($($e:expr),*)) => ({ + match ::call::try(raw::$p($(::call::convert(&$e)),*)) { + Ok(o) => o, + Err(e) => { ::panic::check(); return Err(e) } + } + }) +} + +macro_rules! try_call_iter { + ($($f:tt)*) => { + match call!($($f)*) { + 0 => {} + raw::GIT_ITEROVER => return None, + e => return Some(Err(::call::last_error(e))) + } + } +} + +#[doc(hidden)] +pub trait Convert { + fn convert(&self) -> T; +} + +pub fn convert>(u: &U) -> T { u.convert() } + +pub fn try(ret: libc::c_int) -> Result { + match ret { + n if n < 0 => Err(last_error(n)), + n => Ok(n), + } +} + +pub fn last_error(code: libc::c_int) -> Error { + // Apparently libgit2 isn't necessarily guaranteed to set the last error + // whenever a function returns a negative value! + Error::last_error(code).unwrap_or_else(|| { + Error::from_str("an unknown error occurred") + }) +} + +mod impls { + use std::ffi::CString; + use libc; + + use {raw, ConfigLevel, ResetType, ObjectType, BranchType, Direction}; + use {DiffFormat, FileFavor, SubmoduleIgnore, AutotagOption, FetchPrune}; + use call::Convert; + + impl Convert for T { + fn convert(&self) -> T { *self } + } + + impl Convert for bool { + fn convert(&self) -> libc::c_int { *self as libc::c_int } + } + impl<'a, T> Convert<*const T> for &'a T { + fn convert(&self) -> *const T { *self as *const T } + } + impl<'a, T> Convert<*mut T> for &'a mut T { + fn convert(&self) -> *mut T { &**self as *const T as *mut T } + } + impl Convert<*const T> for *mut T { + fn convert(&self) -> *const T { *self as *const T } + } + + impl Convert<*const libc::c_char> for CString { + fn convert(&self) -> *const libc::c_char { self.as_ptr() } + } + + impl> Convert<*const T> for Option { + fn convert(&self) -> *const T { + self.as_ref().map(|s| s.convert()).unwrap_or(0 as *const _) + } + } + + impl> Convert<*mut T> for Option { + fn convert(&self) -> *mut T { + self.as_ref().map(|s| s.convert()).unwrap_or(0 as *mut _) + } + } + + impl Convert for ResetType { + fn convert(&self) -> raw::git_reset_t { + match *self { + ResetType::Soft => raw::GIT_RESET_SOFT, + ResetType::Hard => raw::GIT_RESET_HARD, + ResetType::Mixed => raw::GIT_RESET_MIXED, + } + } + } + + impl Convert for Direction { + fn convert(&self) -> raw::git_direction { + match *self { + Direction::Push => raw::GIT_DIRECTION_PUSH, + Direction::Fetch => raw::GIT_DIRECTION_FETCH, + } + } + } + + impl Convert for ObjectType { + fn convert(&self) -> raw::git_otype { + match *self { + ObjectType::Any => raw::GIT_OBJ_ANY, + ObjectType::Commit => raw::GIT_OBJ_COMMIT, + ObjectType::Tree => raw::GIT_OBJ_TREE, + ObjectType::Blob => raw::GIT_OBJ_BLOB, + ObjectType::Tag => raw::GIT_OBJ_TAG, + } + } + } + + impl Convert for Option { + fn convert(&self) -> raw::git_otype { + self.unwrap_or(ObjectType::Any).convert() + } + } + + impl Convert for BranchType { + fn convert(&self) -> raw::git_branch_t { + match *self { + BranchType::Remote => raw::GIT_BRANCH_REMOTE, + BranchType::Local => raw::GIT_BRANCH_LOCAL, + } + } + } + + impl Convert for Option { + fn convert(&self) -> raw::git_branch_t { + self.map(|s| s.convert()).unwrap_or(raw::GIT_BRANCH_ALL) + } + } + + impl Convert for ConfigLevel { + fn convert(&self) -> raw::git_config_level_t { + match *self { + ConfigLevel::ProgramData => raw::GIT_CONFIG_LEVEL_PROGRAMDATA, + ConfigLevel::System => raw::GIT_CONFIG_LEVEL_SYSTEM, + ConfigLevel::XDG => raw::GIT_CONFIG_LEVEL_XDG, + ConfigLevel::Global => raw::GIT_CONFIG_LEVEL_GLOBAL, + ConfigLevel::Local => raw::GIT_CONFIG_LEVEL_LOCAL, + ConfigLevel::App => raw::GIT_CONFIG_LEVEL_APP, + ConfigLevel::Highest => raw::GIT_CONFIG_HIGHEST_LEVEL, + } + } + } + + impl Convert for DiffFormat { + fn convert(&self) -> raw::git_diff_format_t { + match *self { + DiffFormat::Patch => raw::GIT_DIFF_FORMAT_PATCH, + DiffFormat::PatchHeader => raw::GIT_DIFF_FORMAT_PATCH_HEADER, + DiffFormat::Raw => raw::GIT_DIFF_FORMAT_RAW, + DiffFormat::NameOnly => raw::GIT_DIFF_FORMAT_NAME_ONLY, + DiffFormat::NameStatus => raw::GIT_DIFF_FORMAT_NAME_STATUS, + } + } + } + + impl Convert for FileFavor { + fn convert(&self) -> raw::git_merge_file_favor_t { + match *self { + FileFavor::Normal => raw::GIT_MERGE_FILE_FAVOR_NORMAL, + FileFavor::Ours => raw::GIT_MERGE_FILE_FAVOR_OURS, + FileFavor::Theirs => raw::GIT_MERGE_FILE_FAVOR_THEIRS, + FileFavor::Union => raw::GIT_MERGE_FILE_FAVOR_UNION, + } + } + } + + impl Convert for SubmoduleIgnore { + fn convert(&self) -> raw::git_submodule_ignore_t { + match *self { + SubmoduleIgnore::Unspecified => + raw::GIT_SUBMODULE_IGNORE_UNSPECIFIED, + SubmoduleIgnore::None => raw::GIT_SUBMODULE_IGNORE_NONE, + SubmoduleIgnore::Untracked => raw::GIT_SUBMODULE_IGNORE_UNTRACKED, + SubmoduleIgnore::Dirty => raw::GIT_SUBMODULE_IGNORE_DIRTY, + SubmoduleIgnore::All => raw::GIT_SUBMODULE_IGNORE_ALL, + } + } + } + + impl Convert for AutotagOption { + fn convert(&self) -> raw::git_remote_autotag_option_t { + match *self { + AutotagOption::Unspecified => + raw::GIT_REMOTE_DOWNLOAD_TAGS_UNSPECIFIED, + AutotagOption::None => raw::GIT_REMOTE_DOWNLOAD_TAGS_NONE, + AutotagOption::Auto => raw::GIT_REMOTE_DOWNLOAD_TAGS_AUTO, + AutotagOption::All => raw::GIT_REMOTE_DOWNLOAD_TAGS_ALL, + } + } + } + + impl Convert for FetchPrune { + fn convert(&self) -> raw::git_fetch_prune_t { + match *self { + FetchPrune::Unspecified => raw::GIT_FETCH_PRUNE_UNSPECIFIED, + FetchPrune::On => raw::GIT_FETCH_PRUNE, + FetchPrune::Off => raw::GIT_FETCH_NO_PRUNE, + } + } + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/cert.rs cargo-0.19.0/vendor/git2-0.6.4/src/cert.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/cert.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/cert.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,97 @@ +//! Certificate types which are passed to `CertificateCheck` in +//! `RemoteCallbacks`. + +use std::marker; +use std::mem; +use std::slice; + +use raw; +use util::Binding; + +/// A certificate for a remote connection, viewable as one of `CertHostkey` or +/// `CertX509` currently. +pub struct Cert<'a> { + raw: *mut raw::git_cert, + _marker: marker::PhantomData<&'a raw::git_cert>, +} + +/// Hostkey information taken from libssh2 +pub struct CertHostkey<'a> { + raw: *mut raw::git_cert_hostkey, + _marker: marker::PhantomData<&'a raw::git_cert>, +} + +/// X.509 certificate information +pub struct CertX509<'a> { + raw: *mut raw::git_cert_x509, + _marker: marker::PhantomData<&'a raw::git_cert>, +} + +impl<'a> Cert<'a> { + /// Attempt to view this certificate as an SSH hostkey. + /// + /// Returns `None` if this is not actually an SSH hostkey. + pub fn as_hostkey(&self) -> Option<&CertHostkey<'a>> { + self.cast(raw::GIT_CERT_HOSTKEY_LIBSSH2) + } + + /// Attempt to view this certificate as an X.509 certificate. + /// + /// Returns `None` if this is not actually an X.509 certificate. + pub fn as_x509(&self) -> Option<&CertX509<'a>> { + self.cast(raw::GIT_CERT_X509) + } + + fn cast(&self, kind: raw::git_cert_t) -> Option<&T> { + assert_eq!(mem::size_of::>(), mem::size_of::()); + unsafe { + if kind == (*self.raw).cert_type { + Some(&*(self as *const Cert<'a> as *const T)) + } else { + None + } + } + } +} + +impl<'a> CertHostkey<'a> { + /// Returns the md5 hash of the hostkey, if available. + pub fn hash_md5(&self) -> Option<&[u8; 16]> { + unsafe { + if (*self.raw).kind as u32 & raw::GIT_CERT_SSH_MD5 as u32 == 0 { + None + } else { + Some(&(*self.raw).hash_md5) + } + } + } + + /// Returns the SHA-1 hash of the hostkey, if available. + pub fn hash_sha1(&self) -> Option<&[u8; 20]> { + unsafe { + if (*self.raw).kind as u32 & raw::GIT_CERT_SSH_SHA1 as u32 == 0 { + None + } else { + Some(&(*self.raw).hash_sha1) + } + } + } +} + +impl<'a> CertX509<'a> { + /// Return the X.509 certificate data as a byte slice + pub fn data(&self) -> &[u8] { + unsafe { + slice::from_raw_parts((*self.raw).data as *const u8, + (*self.raw).len as usize) + } + } +} + +impl<'a> Binding for Cert<'a> { + type Raw = *mut raw::git_cert; + unsafe fn from_raw(raw: *mut raw::git_cert) -> Cert<'a> { + Cert { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *mut raw::git_cert { self.raw } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/commit.rs cargo-0.19.0/vendor/git2-0.6.4/src/commit.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/commit.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/commit.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,340 @@ +use std::marker; +use std::mem; +use std::ops::Range; +use std::str; +use libc; + +use {raw, signature, Oid, Error, Signature, Tree, Time, Object}; +use util::Binding; + +/// A structure to represent a git [commit][1] +/// +/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects +pub struct Commit<'repo> { + raw: *mut raw::git_commit, + _marker: marker::PhantomData>, +} + +/// An iterator over the parent commits of a commit. +pub struct Parents<'commit, 'repo: 'commit> { + range: Range, + commit: &'commit Commit<'repo>, +} + +/// An iterator over the parent commits' ids of a commit. +pub struct ParentIds<'commit> { + range: Range, + commit: &'commit Commit<'commit>, +} + +impl<'repo> Commit<'repo> { + /// Get the id (SHA1) of a repository commit + pub fn id(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_commit_id(&*self.raw)) } + } + + /// Get the id of the tree pointed to by this commit. + /// + /// No attempts are made to fetch an object from the ODB. + pub fn tree_id(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_commit_tree_id(&*self.raw)) } + } + + /// Get the tree pointed to by a commit. + pub fn tree(&self) -> Result, Error> { + let mut ret = 0 as *mut raw::git_tree; + unsafe { + try_call!(raw::git_commit_tree(&mut ret, &*self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// Get access to the underlying raw pointer. + pub fn raw(&self) -> *mut raw::git_commit { self.raw } + + /// Get the full message of a commit. + /// + /// The returned message will be slightly prettified by removing any + /// potential leading newlines. + /// + /// `None` will be returned if the message is not valid utf-8 + pub fn message(&self) -> Option<&str> { + str::from_utf8(self.message_bytes()).ok() + } + + /// Get the full message of a commit as a byte slice. + /// + /// The returned message will be slightly prettified by removing any + /// potential leading newlines. + pub fn message_bytes(&self) -> &[u8] { + unsafe { + ::opt_bytes(self, raw::git_commit_message(&*self.raw)).unwrap() + } + } + + /// Get the encoding for the message of a commit, as a string representing a + /// standard encoding name. + /// + /// `None` will be returned if the encoding is not known + pub fn message_encoding(&self) -> Option<&str> { + let bytes = unsafe { + ::opt_bytes(self, raw::git_commit_message(&*self.raw)) + }; + bytes.map(|b| str::from_utf8(b).unwrap()) + } + + /// Get the full raw message of a commit. + /// + /// `None` will be returned if the message is not valid utf-8 + pub fn message_raw(&self) -> Option<&str> { + str::from_utf8(self.message_raw_bytes()).ok() + } + + /// Get the full raw message of a commit. + pub fn message_raw_bytes(&self) -> &[u8] { + unsafe { + ::opt_bytes(self, raw::git_commit_message_raw(&*self.raw)).unwrap() + } + } + + /// Get the full raw text of the commit header. + /// + /// `None` will be returned if the message is not valid utf-8 + pub fn raw_header(&self) -> Option<&str> { + str::from_utf8(self.raw_header_bytes()).ok() + } + + /// Get the full raw text of the commit header. + pub fn raw_header_bytes(&self) -> &[u8] { + unsafe { + ::opt_bytes(self, raw::git_commit_raw_header(&*self.raw)).unwrap() + } + } + + /// Get the short "summary" of the git commit message. + /// + /// The returned message is the summary of the commit, comprising the first + /// paragraph of the message with whitespace trimmed and squashed. + /// + /// `None` may be returned if an error occurs or if the summary is not valid + /// utf-8. + pub fn summary(&mut self) -> Option<&str> { + self.summary_bytes().and_then(|s| str::from_utf8(s).ok()) + } + + /// Get the short "summary" of the git commit message. + /// + /// The returned message is the summary of the commit, comprising the first + /// paragraph of the message with whitespace trimmed and squashed. + /// + /// `None` may be returned if an error occurs + pub fn summary_bytes(&mut self) -> Option<&[u8]> { + unsafe { ::opt_bytes(self, raw::git_commit_summary(self.raw)) } + } + + /// Get the commit time (i.e. committer time) of a commit. + /// + /// The first element of the tuple is the time, in seconds, since the epoch. + /// The second element is the offset, in minutes, of the time zone of the + /// committer's preferred time zone. + pub fn time(&self) -> Time { + unsafe { + Time::new(raw::git_commit_time(&*self.raw) as i64, + raw::git_commit_time_offset(&*self.raw) as i32) + } + } + + /// Creates a new iterator over the parents of this commit. + pub fn parents<'a>(&'a self) -> Parents<'a, 'repo> { + let max = unsafe { raw::git_commit_parentcount(&*self.raw) as usize }; + Parents { range: 0..max, commit: self } + } + + /// Creates a new iterator over the parents of this commit. + pub fn parent_ids(&self) -> ParentIds { + let max = unsafe { raw::git_commit_parentcount(&*self.raw) as usize }; + ParentIds { range: 0..max, commit: self } + } + + /// Get the author of this commit. + pub fn author(&self) -> Signature { + unsafe { + let ptr = raw::git_commit_author(&*self.raw); + signature::from_raw_const(self, ptr) + } + } + + /// Get the committer of this commit. + pub fn committer(&self) -> Signature { + unsafe { + let ptr = raw::git_commit_committer(&*self.raw); + signature::from_raw_const(self, ptr) + } + } + + /// Amend this existing commit with all non-`None` values + /// + /// This creates a new commit that is exactly the same as the old commit, + /// except that any non-`None` values will be updated. The new commit has + /// the same parents as the old commit. + /// + /// For information about `update_ref`, see `new`. + pub fn amend(&self, + update_ref: Option<&str>, + author: Option<&Signature>, + committer: Option<&Signature>, + message_encoding: Option<&str>, + message: Option<&str>, + tree: Option<&Tree<'repo>>) -> Result { + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + let update_ref = try!(::opt_cstr(update_ref)); + let encoding = try!(::opt_cstr(message_encoding)); + let message = try!(::opt_cstr(message)); + unsafe { + try_call!(raw::git_commit_amend(&mut raw, + self.raw(), + update_ref, + author.map(|s| s.raw()), + committer.map(|s| s.raw()), + encoding, + message, + tree.map(|t| t.raw()))); + Ok(Binding::from_raw(&raw as *const _)) + } + } + + /// Get the specified parent of the commit. + /// + /// Use the `parents` iterator to return an iterator over all parents. + pub fn parent(&self, i: usize) -> Result, Error> { + unsafe { + let mut raw = 0 as *mut raw::git_commit; + try_call!(raw::git_commit_parent(&mut raw, &*self.raw, + i as libc::c_uint)); + Ok(Binding::from_raw(raw)) + } + } + + /// Get the specified parent id of the commit. + /// + /// This is different from `parent`, which will attemptstempt to load the + /// parent commit from the ODB. + /// + /// Use the `parent_ids` iterator to return an iterator over all parents. + pub fn parent_id(&self, i: usize) -> Result { + unsafe { + let id = raw::git_commit_parent_id(self.raw, i as libc::c_uint); + if id.is_null() { + Err(Error::from_str("parent index out of bounds")) + } else { + Ok(Binding::from_raw(id)) + } + } + } + + /// Casts this Commit to be usable as an `Object` + pub fn as_object(&self) -> &Object<'repo> { + unsafe { + &*(self as *const _ as *const Object<'repo>) + } + } + + /// Consumes Commit to be returned as an `Object` + pub fn into_object(self) -> Object<'repo> { + assert_eq!(mem::size_of_val(&self), mem::size_of::()); + unsafe { + mem::transmute(self) + } + } +} + +impl<'repo> Binding for Commit<'repo> { + type Raw = *mut raw::git_commit; + unsafe fn from_raw(raw: *mut raw::git_commit) -> Commit<'repo> { + Commit { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *mut raw::git_commit { self.raw } +} + + +impl<'repo, 'commit> Iterator for Parents<'commit, 'repo> { + type Item = Commit<'repo>; + fn next(&mut self) -> Option> { + self.range.next().map(|i| self.commit.parent(i).unwrap()) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} + +impl<'repo, 'commit> DoubleEndedIterator for Parents<'commit, 'repo> { + fn next_back(&mut self) -> Option> { + self.range.next_back().map(|i| self.commit.parent(i).unwrap()) + } +} + +impl<'repo, 'commit> ExactSizeIterator for Parents<'commit, 'repo> {} + +impl<'commit> Iterator for ParentIds<'commit> { + type Item = Oid; + fn next(&mut self) -> Option { + self.range.next().map(|i| self.commit.parent_id(i).unwrap()) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} + +impl<'commit> DoubleEndedIterator for ParentIds<'commit> { + fn next_back(&mut self) -> Option { + self.range.next_back().map(|i| self.commit.parent_id(i).unwrap()) + } +} + +impl<'commit> ExactSizeIterator for ParentIds<'commit> {} + +impl<'repo> Drop for Commit<'repo> { + fn drop(&mut self) { + unsafe { raw::git_commit_free(self.raw) } + } +} + +#[cfg(test)] +mod tests { + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + let head = repo.head().unwrap(); + let target = head.target().unwrap(); + let mut commit = repo.find_commit(target).unwrap(); + assert_eq!(commit.message(), Some("initial")); + assert_eq!(commit.id(), target); + commit.message_raw().unwrap(); + commit.raw_header().unwrap(); + commit.message_encoding(); + commit.summary().unwrap(); + commit.tree_id(); + commit.tree().unwrap(); + assert_eq!(commit.parents().count(), 0); + + assert_eq!(commit.author().name(), Some("name")); + assert_eq!(commit.author().email(), Some("email")); + assert_eq!(commit.committer().name(), Some("name")); + assert_eq!(commit.committer().email(), Some("email")); + + let sig = repo.signature().unwrap(); + let tree = repo.find_tree(commit.tree_id()).unwrap(); + let id = repo.commit(Some("HEAD"), &sig, &sig, "bar", &tree, + &[&commit]).unwrap(); + let head = repo.find_commit(id).unwrap(); + + let new_head = head.amend(Some("HEAD"), None, None, None, + Some("new message"), None).unwrap(); + let new_head = repo.find_commit(new_head).unwrap(); + assert_eq!(new_head.message(), Some("new message")); + new_head.into_object(); + + repo.find_object(target, None).unwrap().as_commit().unwrap(); + repo.find_object(target, None).unwrap().into_commit().ok().unwrap(); + } +} + diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/config.rs cargo-0.19.0/vendor/git2-0.6.4/src/config.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/config.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/config.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,607 @@ +use std::ffi::CString; +use std::marker; +use std::path::{Path, PathBuf}; +use std::str; +use libc; + +use {raw, Error, ConfigLevel, Buf, IntoCString}; +use util::{self, Binding}; + +/// A structure representing a git configuration key/value store +pub struct Config { + raw: *mut raw::git_config, +} + +/// A struct representing a certain entry owned by a `Config` instance. +/// +/// An entry has a name, a value, and a level it applies to. +pub struct ConfigEntry<'cfg> { + raw: *mut raw::git_config_entry, + _marker: marker::PhantomData<&'cfg Config>, + owned: bool, +} + +/// An iterator over the `ConfigEntry` values of a `Config` structure. +pub struct ConfigEntries<'cfg> { + raw: *mut raw::git_config_iterator, + _marker: marker::PhantomData<&'cfg Config>, +} + +impl Config { + /// Allocate a new configuration object + /// + /// This object is empty, so you have to add a file to it before you can do + /// anything with it. + pub fn new() -> Result { + ::init(); + let mut raw = 0 as *mut raw::git_config; + unsafe { + try_call!(raw::git_config_new(&mut raw)); + Ok(Binding::from_raw(raw)) + } + } + + /// Create a new config instance containing a single on-disk file + pub fn open(path: &Path) -> Result { + ::init(); + let mut raw = 0 as *mut raw::git_config; + let path = try!(path.into_c_string()); + unsafe { + try_call!(raw::git_config_open_ondisk(&mut raw, path)); + Ok(Binding::from_raw(raw)) + } + } + + /// Open the global, XDG and system configuration files + /// + /// Utility wrapper that finds the global, XDG and system configuration + /// files and opens them into a single prioritized config object that can + /// be used when accessing default config data outside a repository. + pub fn open_default() -> Result { + ::init(); + let mut raw = 0 as *mut raw::git_config; + unsafe { + try_call!(raw::git_config_open_default(&mut raw)); + Ok(Binding::from_raw(raw)) + } + } + + /// Locate the path to the global configuration file + /// + /// The user or global configuration file is usually located in + /// `$HOME/.gitconfig`. + /// + /// This method will try to guess the full path to that file, if the file + /// exists. The returned path may be used on any method call to load + /// the global configuration file. + /// + /// This method will not guess the path to the xdg compatible config file + /// (`.config/git/config`). + pub fn find_global() -> Result { + ::init(); + let buf = Buf::new(); + unsafe { try_call!(raw::git_config_find_global(buf.raw())); } + Ok(util::bytes2path(&buf).to_path_buf()) + } + + /// Locate the path to the system configuration file + /// + /// If /etc/gitconfig doesn't exist, it will look for %PROGRAMFILES% + pub fn find_system() -> Result { + ::init(); + let buf = Buf::new(); + unsafe { try_call!(raw::git_config_find_system(buf.raw())); } + Ok(util::bytes2path(&buf).to_path_buf()) + } + + /// Locate the path to the global xdg compatible configuration file + /// + /// The xdg compatible configuration file is usually located in + /// `$HOME/.config/git/config`. + pub fn find_xdg() -> Result { + ::init(); + let buf = Buf::new(); + unsafe { try_call!(raw::git_config_find_xdg(buf.raw())); } + Ok(util::bytes2path(&buf).to_path_buf()) + } + + /// Add an on-disk config file instance to an existing config + /// + /// The on-disk file pointed at by path will be opened and parsed; it's + /// expected to be a native Git config file following the default Git config + /// syntax (see man git-config). + /// + /// Further queries on this config object will access each of the config + /// file instances in order (instances with a higher priority level will be + /// accessed first). + pub fn add_file(&mut self, path: &Path, level: ConfigLevel, + force: bool) -> Result<(), Error> { + let path = try!(path.into_c_string()); + unsafe { + try_call!(raw::git_config_add_file_ondisk(self.raw, path, level, + force)); + Ok(()) + } + } + + /// Delete a config variable from the config file with the highest level + /// (usually the local one). + pub fn remove(&mut self, name: &str) -> Result<(), Error> { + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_delete_entry(self.raw, name)); + Ok(()) + } + } + + /// Get the value of a boolean config variable. + /// + /// All config files will be looked into, in the order of their defined + /// level. A higher level means a higher priority. The first occurrence of + /// the variable will be returned here. + pub fn get_bool(&self, name: &str) -> Result { + let mut out = 0 as libc::c_int; + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_get_bool(&mut out, &*self.raw, name)); + + } + Ok(if out == 0 {false} else {true}) + } + + /// Get the value of an integer config variable. + /// + /// All config files will be looked into, in the order of their defined + /// level. A higher level means a higher priority. The first occurrence of + /// the variable will be returned here. + pub fn get_i32(&self, name: &str) -> Result { + let mut out = 0i32; + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_get_int32(&mut out, &*self.raw, name)); + + } + Ok(out) + } + + /// Get the value of an integer config variable. + /// + /// All config files will be looked into, in the order of their defined + /// level. A higher level means a higher priority. The first occurrence of + /// the variable will be returned here. + pub fn get_i64(&self, name: &str) -> Result { + let mut out = 0i64; + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_get_int64(&mut out, &*self.raw, name)); + } + Ok(out) + } + + /// Get the value of a string config variable. + /// + /// This is the same as `get_bytes` except that it may return `Err` if + /// the bytes are not valid utf-8. + pub fn get_str(&self, name: &str) -> Result<&str, Error> { + str::from_utf8(try!(self.get_bytes(name))).map_err(|_| { + Error::from_str("configuration value is not valid utf8") + }) + } + + /// Get the value of a string config variable as a byte slice. + /// + /// This method will return an error if this `Config` is not a snapshot. + pub fn get_bytes(&self, name: &str) -> Result<&[u8], Error> { + let mut ret = 0 as *const libc::c_char; + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_get_string(&mut ret, &*self.raw, name)); + Ok(::opt_bytes(self, ret).unwrap()) + } + } + + /// Get the value of a string config variable as an owned string. + /// + /// An error will be returned if the config value is not valid utf-8. + pub fn get_string(&self, name: &str) -> Result { + let ret = Buf::new(); + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_get_string_buf(ret.raw(), self.raw, name)); + } + str::from_utf8(&ret).map(|s| s.to_string()).map_err(|_| { + Error::from_str("configuration value is not valid utf8") + }) + } + + /// Get the value of a path config variable as an owned . + pub fn get_path(&self, name: &str) -> Result { + let ret = Buf::new(); + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_get_path(ret.raw(), self.raw, name)); + } + Ok(::util::bytes2path(&ret).to_path_buf()) + } + + /// Get the ConfigEntry for a config variable. + pub fn get_entry(&self, name: &str) -> Result { + let mut ret = 0 as *mut raw::git_config_entry; + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_get_entry(&mut ret, self.raw, name)); + Ok(Binding::from_raw(ret)) + } + } + + /// Iterate over all the config variables + /// + /// If `glob` is `Some`, then the iterator will only iterate over all + /// variables whose name matches the pattern. + /// + /// # Example + /// + /// ``` + /// # #![allow(unstable)] + /// use git2::Config; + /// + /// let cfg = Config::new().unwrap(); + /// + /// for entry in &cfg.entries(None).unwrap() { + /// let entry = entry.unwrap(); + /// println!("{} => {}", entry.name().unwrap(), entry.value().unwrap()); + /// } + /// ``` + pub fn entries(&self, glob: Option<&str>) -> Result { + let mut ret = 0 as *mut raw::git_config_iterator; + unsafe { + match glob { + Some(s) => { + let s = try!(CString::new(s)); + try_call!(raw::git_config_iterator_glob_new(&mut ret, + &*self.raw, + s)); + } + None => { + try_call!(raw::git_config_iterator_new(&mut ret, &*self.raw)); + } + } + Ok(Binding::from_raw(ret)) + } + } + + /// Open the global/XDG configuration file according to git's rules + /// + /// Git allows you to store your global configuration at `$HOME/.config` or + /// `$XDG_CONFIG_HOME/git/config`. For backwards compatability, the XDG file + /// shouldn't be used unless the use has created it explicitly. With this + /// function you'll open the correct one to write to. + pub fn open_global(&mut self) -> Result { + let mut raw = 0 as *mut raw::git_config; + unsafe { + try_call!(raw::git_config_open_global(&mut raw, self.raw)); + Ok(Binding::from_raw(raw)) + } + } + + /// Build a single-level focused config object from a multi-level one. + /// + /// The returned config object can be used to perform get/set/delete + /// operations on a single specific level. + pub fn open_level(&self, level: ConfigLevel) -> Result { + let mut raw = 0 as *mut raw::git_config; + unsafe { + try_call!(raw::git_config_open_level(&mut raw, &*self.raw, level)); + Ok(Binding::from_raw(raw)) + } + } + + /// Set the value of a boolean config variable in the config file with the + /// highest level (usually the local one). + pub fn set_bool(&mut self, name: &str, value: bool) -> Result<(), Error> { + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_set_bool(self.raw, name, value)); + } + Ok(()) + } + + /// Set the value of an integer config variable in the config file with the + /// highest level (usually the local one). + pub fn set_i32(&mut self, name: &str, value: i32) -> Result<(), Error> { + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_set_int32(self.raw, name, value)); + } + Ok(()) + } + + /// Set the value of an integer config variable in the config file with the + /// highest level (usually the local one). + pub fn set_i64(&mut self, name: &str, value: i64) -> Result<(), Error> { + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_config_set_int64(self.raw, name, value)); + } + Ok(()) + } + + /// Set the value of an multivar config variable in the config file with the + /// highest level (usually the local one). + pub fn set_multivar(&mut self, name: &str, regexp: &str, value: &str) -> Result<(), Error> { + let name = try!(CString::new(name)); + let regexp = try!(CString::new(regexp)); + let value = try!(CString::new(value)); + unsafe { + try_call!(raw::git_config_set_multivar(self.raw, name, regexp, value)); + } + Ok(()) + } + + /// Set the value of a string config variable in the config file with the + /// highest level (usually the local one). + pub fn set_str(&mut self, name: &str, value: &str) -> Result<(), Error> { + let name = try!(CString::new(name)); + let value = try!(CString::new(value)); + unsafe { + try_call!(raw::git_config_set_string(self.raw, name, value)); + } + Ok(()) + } + + /// Create a snapshot of the configuration + /// + /// Create a snapshot of the current state of a configuration, which allows + /// you to look into a consistent view of the configuration for looking up + /// complex values (e.g. a remote, submodule). + pub fn snapshot(&mut self) -> Result { + let mut ret = 0 as *mut raw::git_config; + unsafe { + try_call!(raw::git_config_snapshot(&mut ret, self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// Parse a string as a bool. + /// Interprets "true", "yes", "on", 1, or any non-zero number as true. + /// Interprets "false", "no", "off", 0, or an empty string as false. + pub fn parse_bool(s: S) -> Result { + let s = try!(s.into_c_string()); + let mut out = 0; + ::init(); + unsafe { + try_call!(raw::git_config_parse_bool(&mut out, s)); + } + Ok(out != 0) + } + + /// Parse a string as an i32; handles suffixes like k, M, or G, and + /// multiplies by the appropriate power of 1024. + pub fn parse_i32(s: S) -> Result { + let s = try!(s.into_c_string()); + let mut out = 0; + ::init(); + unsafe { + try_call!(raw::git_config_parse_int32(&mut out, s)); + } + Ok(out) + } + + /// Parse a string as an i64; handles suffixes like k, M, or G, and + /// multiplies by the appropriate power of 1024. + pub fn parse_i64(s: S) -> Result { + let s = try!(s.into_c_string()); + let mut out = 0; + ::init(); + unsafe { + try_call!(raw::git_config_parse_int64(&mut out, s)); + } + Ok(out) + } +} + +impl Binding for Config { + type Raw = *mut raw::git_config; + unsafe fn from_raw(raw: *mut raw::git_config) -> Config { + Config { raw: raw } + } + fn raw(&self) -> *mut raw::git_config { self.raw } +} + +impl Drop for Config { + fn drop(&mut self) { + unsafe { raw::git_config_free(self.raw) } + } +} + +impl<'cfg> ConfigEntry<'cfg> { + /// Gets the name of this entry. + /// + /// May return `None` if the name is not valid utf-8 + pub fn name(&self) -> Option<&str> { str::from_utf8(self.name_bytes()).ok() } + + /// Gets the name of this entry as a byte slice. + pub fn name_bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, (*self.raw).name).unwrap() } + } + + /// Gets the value of this entry. + /// + /// May return `None` if the value is not valid utf-8 + pub fn value(&self) -> Option<&str> { str::from_utf8(self.value_bytes()).ok() } + + /// Gets the value of this entry as a byte slice. + pub fn value_bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, (*self.raw).value).unwrap() } + } + + /// Gets the configuration level of this entry. + pub fn level(&self) -> ConfigLevel { + unsafe { ConfigLevel::from_raw((*self.raw).level) } + } +} + +impl<'cfg> Binding for ConfigEntry<'cfg> { + type Raw = *mut raw::git_config_entry; + + unsafe fn from_raw(raw: *mut raw::git_config_entry) + -> ConfigEntry<'cfg> { + ConfigEntry { + raw: raw, + _marker: marker::PhantomData, + owned: true, + } + } + fn raw(&self) -> *mut raw::git_config_entry { self.raw } +} + +impl<'cfg> Binding for ConfigEntries<'cfg> { + type Raw = *mut raw::git_config_iterator; + + unsafe fn from_raw(raw: *mut raw::git_config_iterator) + -> ConfigEntries<'cfg> { + ConfigEntries { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *mut raw::git_config_iterator { self.raw } +} + +// entries are only valid until the iterator is freed, so this impl is for +// `&'b T` instead of `T` to have a lifetime to tie them to. +// +// It's also not implemented for `&'b mut T` so we can have multiple entries +// (ok). +impl<'cfg, 'b> Iterator for &'b ConfigEntries<'cfg> { + type Item = Result, Error>; + fn next(&mut self) -> Option, Error>> { + let mut raw = 0 as *mut raw::git_config_entry; + unsafe { + try_call_iter!(raw::git_config_next(&mut raw, self.raw)); + Some(Ok(ConfigEntry { + owned: false, + raw: raw, + _marker: marker::PhantomData, + })) + } + } +} + +impl<'cfg> Drop for ConfigEntries<'cfg> { + fn drop(&mut self) { + unsafe { raw::git_config_iterator_free(self.raw) } + } +} + +impl<'cfg> Drop for ConfigEntry<'cfg> { + fn drop(&mut self) { + if self.owned { + unsafe { raw::git_config_entry_free(self.raw) } + } + } +} + +#[cfg(test)] +mod tests { + use std::fs::File; + use tempdir::TempDir; + + use Config; + + #[test] + fn smoke() { + let _cfg = Config::new().unwrap(); + let _ = Config::find_global(); + let _ = Config::find_system(); + let _ = Config::find_xdg(); + } + + #[test] + fn persisted() { + let td = TempDir::new("test").unwrap(); + let path = td.path().join("foo"); + File::create(&path).unwrap(); + + let mut cfg = Config::open(&path).unwrap(); + assert!(cfg.get_bool("foo.bar").is_err()); + cfg.set_bool("foo.k1", true).unwrap(); + cfg.set_i32("foo.k2", 1).unwrap(); + cfg.set_i64("foo.k3", 2).unwrap(); + cfg.set_str("foo.k4", "bar").unwrap(); + cfg.snapshot().unwrap(); + drop(cfg); + + let cfg = Config::open(&path).unwrap().snapshot().unwrap(); + assert_eq!(cfg.get_bool("foo.k1").unwrap(), true); + assert_eq!(cfg.get_i32("foo.k2").unwrap(), 1); + assert_eq!(cfg.get_i64("foo.k3").unwrap(), 2); + assert_eq!(cfg.get_str("foo.k4").unwrap(), "bar"); + + for entry in &cfg.entries(None).unwrap() { + let entry = entry.unwrap(); + entry.name(); + entry.value(); + entry.level(); + } + } + + #[test] + fn multivar() { + let td = TempDir::new("test").unwrap(); + let path = td.path().join("foo"); + File::create(&path).unwrap(); + + let mut cfg = Config::open(&path).unwrap(); + cfg.set_multivar("foo.bar", "^$", "baz").unwrap(); + cfg.set_multivar("foo.bar", "^$", "qux").unwrap(); + + let mut values: Vec = cfg.entries(None) + .unwrap() + .into_iter() + .map(|entry| entry.unwrap().value().unwrap().into()) + .collect(); + values.sort(); + assert_eq!(values, ["baz", "qux"]); + } + + #[test] + fn parse() { + assert_eq!(Config::parse_bool("").unwrap(), false); + assert_eq!(Config::parse_bool("false").unwrap(), false); + assert_eq!(Config::parse_bool("no").unwrap(), false); + assert_eq!(Config::parse_bool("off").unwrap(), false); + assert_eq!(Config::parse_bool("0").unwrap(), false); + + assert_eq!(Config::parse_bool("true").unwrap(), true); + assert_eq!(Config::parse_bool("yes").unwrap(), true); + assert_eq!(Config::parse_bool("on").unwrap(), true); + assert_eq!(Config::parse_bool("1").unwrap(), true); + assert_eq!(Config::parse_bool("42").unwrap(), true); + + assert!(Config::parse_bool(" ").is_err()); + assert!(Config::parse_bool("some-string").is_err()); + assert!(Config::parse_bool("-").is_err()); + + assert_eq!(Config::parse_i32("0").unwrap(), 0); + assert_eq!(Config::parse_i32("1").unwrap(), 1); + assert_eq!(Config::parse_i32("100").unwrap(), 100); + assert_eq!(Config::parse_i32("-1").unwrap(), -1); + assert_eq!(Config::parse_i32("-100").unwrap(), -100); + assert_eq!(Config::parse_i32("1k").unwrap(), 1024); + assert_eq!(Config::parse_i32("4k").unwrap(), 4096); + assert_eq!(Config::parse_i32("1M").unwrap(), 1048576); + assert_eq!(Config::parse_i32("1G").unwrap(), 1024*1024*1024); + + assert_eq!(Config::parse_i64("0").unwrap(), 0); + assert_eq!(Config::parse_i64("1").unwrap(), 1); + assert_eq!(Config::parse_i64("100").unwrap(), 100); + assert_eq!(Config::parse_i64("-1").unwrap(), -1); + assert_eq!(Config::parse_i64("-100").unwrap(), -100); + assert_eq!(Config::parse_i64("1k").unwrap(), 1024); + assert_eq!(Config::parse_i64("4k").unwrap(), 4096); + assert_eq!(Config::parse_i64("1M").unwrap(), 1048576); + assert_eq!(Config::parse_i64("1G").unwrap(), 1024*1024*1024); + assert_eq!(Config::parse_i64("100G").unwrap(), 100*1024*1024*1024); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/cred.rs cargo-0.19.0/vendor/git2-0.6.4/src/cred.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/cred.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/cred.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,470 @@ +use std::ffi::CString; +use std::io::Write; +use std::mem; +use std::path::Path; +use std::process::{Command, Stdio}; +use url; + +use {raw, Error, Config, IntoCString}; +use util::Binding; + +/// A structure to represent git credentials in libgit2. +pub struct Cred { + raw: *mut raw::git_cred, +} + +/// Management of the gitcredentials(7) interface. +pub struct CredentialHelper { + /// A public field representing the currently discovered username from + /// configuration. + pub username: Option, + protocol: Option, + host: Option, + url: String, + commands: Vec, +} + +impl Cred { + /// Create a "default" credential usable for Negotiate mechanisms like NTLM + /// or Kerberos authentication. + pub fn default() -> Result { + ::init(); + let mut out = 0 as *mut raw::git_cred; + unsafe { + try_call!(raw::git_cred_default_new(&mut out)); + Ok(Binding::from_raw(out)) + } + } + + /// Create a new ssh key credential object used for querying an ssh-agent. + /// + /// The username specified is the username to authenticate. + pub fn ssh_key_from_agent(username: &str) -> Result { + ::init(); + let mut out = 0 as *mut raw::git_cred; + let username = try!(CString::new(username)); + unsafe { + try_call!(raw::git_cred_ssh_key_from_agent(&mut out, username)); + Ok(Binding::from_raw(out)) + } + } + + /// Create a new passphrase-protected ssh key credential object. + pub fn ssh_key(username: &str, + publickey: Option<&Path>, + privatekey: &Path, + passphrase: Option<&str>) -> Result { + ::init(); + let username = try!(CString::new(username)); + let publickey = try!(::opt_cstr(publickey)); + let privatekey = try!(privatekey.into_c_string()); + let passphrase = try!(::opt_cstr(passphrase)); + let mut out = 0 as *mut raw::git_cred; + unsafe { + try_call!(raw::git_cred_ssh_key_new(&mut out, username, publickey, + privatekey, passphrase)); + Ok(Binding::from_raw(out)) + } + } + + /// Create a new plain-text username and password credential object. + pub fn userpass_plaintext(username: &str, + password: &str) -> Result { + ::init(); + let username = try!(CString::new(username)); + let password = try!(CString::new(password)); + let mut out = 0 as *mut raw::git_cred; + unsafe { + try_call!(raw::git_cred_userpass_plaintext_new(&mut out, username, + password)); + Ok(Binding::from_raw(out)) + } + } + + /// Attempt to read `credential.helper` according to gitcredentials(7) [1] + /// + /// This function will attempt to parse the user's `credential.helper` + /// configuration, invoke the necessary processes, and read off what the + /// username/password should be for a particular url. + /// + /// The returned credential type will be a username/password credential if + /// successful. + /// + /// [1]: https://www.kernel.org/pub/software/scm/git/docs/gitcredentials.html + pub fn credential_helper(config: &Config, + url: &str, + username: Option<&str>) + -> Result { + match CredentialHelper::new(url).config(config).username(username) + .execute() { + Some((username, password)) => { + Cred::userpass_plaintext(&username, &password) + } + None => Err(Error::from_str("failed to acquire username/password \ + from local configuration")) + } + } + + /// Create a credential to specify a username. + /// + /// THis is used with ssh authentication to query for the username if non is + /// specified in the url. + pub fn username(username: &str) -> Result { + ::init(); + let username = try!(CString::new(username)); + let mut out = 0 as *mut raw::git_cred; + unsafe { + try_call!(raw::git_cred_username_new(&mut out, username)); + Ok(Binding::from_raw(out)) + } + } + + /// Check whether a credential object contains username information. + pub fn has_username(&self) -> bool { + unsafe { raw::git_cred_has_username(self.raw) == 1 } + } + + /// Return the type of credentials that this object represents. + pub fn credtype(&self) -> raw::git_credtype_t { + unsafe { (*self.raw).credtype } + } + + /// Unwrap access to the underlying raw pointer, canceling the destructor + pub unsafe fn unwrap(mut self) -> *mut raw::git_cred { + mem::replace(&mut self.raw, 0 as *mut raw::git_cred) + } +} + +impl Binding for Cred { + type Raw = *mut raw::git_cred; + + unsafe fn from_raw(raw: *mut raw::git_cred) -> Cred { + Cred { raw: raw } + } + fn raw(&self) -> *mut raw::git_cred { self.raw } +} + +impl Drop for Cred { + fn drop(&mut self) { + if !self.raw.is_null() { + unsafe { ((*self.raw).free)(self.raw) } + } + } +} + +impl CredentialHelper { + /// Create a new credential helper object which will be used to probe git's + /// local credential configuration. + /// + /// The url specified is the namespace on which this will query credentials. + /// Invalid urls are currently ignored. + pub fn new(url: &str) -> CredentialHelper { + let mut ret = CredentialHelper { + protocol: None, + host: None, + username: None, + url: url.to_string(), + commands: Vec::new(), + }; + + // Parse out the (protocol, host) if one is available + if let Ok(url) = url::Url::parse(url) { + if let Some(url::Host::Domain(s)) = url.host() { + ret.host = Some(s.to_string()); + } + ret.protocol = Some(url.scheme().to_string()) + } + return ret; + } + + /// Set the username that this credential helper will query with. + /// + /// By default the username is `None`. + pub fn username(&mut self, username: Option<&str>) -> &mut CredentialHelper { + self.username = username.map(|s| s.to_string()); + self + } + + /// Query the specified configuration object to discover commands to + /// execute, usernames to query, etc. + pub fn config(&mut self, config: &Config) -> &mut CredentialHelper { + // Figure out the configured username/helper program. + // + // see http://git-scm.com/docs/gitcredentials.html#_configuration_options + // + // TODO: implement useHttpPath + if self.username.is_none() { + self.config_username(config); + } + self.config_helper(config); + self + } + + // Configure the queried username from `config` + fn config_username(&mut self, config: &Config) { + let key = self.exact_key("username"); + self.username = config.get_string(&key).ok().or_else(|| { + self.url_key("username").and_then(|s| { + config.get_string(&s).ok() + }) + }).or_else(|| { + config.get_string("credential.username").ok() + }) + } + + // Discover all `helper` directives from `config` + fn config_helper(&mut self, config: &Config) { + let exact = config.get_string(&self.exact_key("helper")); + self.add_command(exact.as_ref().ok().map(|s| &s[..])); + match self.url_key("helper") { + Some(key) => { + let url = config.get_string(&key); + self.add_command(url.as_ref().ok().map(|s| &s[..])); + } + None => {} + } + let global = config.get_string("credential.helper"); + self.add_command(global.as_ref().ok().map(|s| &s[..])); + } + + // Add a `helper` configured command to the list of commands to execute. + // + // see https://www.kernel.org/pub/software/scm/git/docs/technical + // /api-credentials.html#_credential_helpers + fn add_command(&mut self, cmd: Option<&str>) { + let cmd = match cmd { + Some("") | None => return, + Some(s) => s, + }; + + if cmd.starts_with("!") { + self.commands.push(cmd[1..].to_string()); + } else if cmd.starts_with("/") || cmd.starts_with("\\") || + cmd[1..].starts_with(":\\") { + self.commands.push(format!("\"{}\"", cmd)); + } else { + self.commands.push(format!("git credential-{}", cmd)); + } + } + + fn exact_key(&self, name: &str) -> String { + format!("credential.{}.{}", self.url, name) + } + + fn url_key(&self, name: &str) -> Option { + match (&self.host, &self.protocol) { + (&Some(ref host), &Some(ref protocol)) => { + Some(format!("credential.{}://{}.{}", protocol, host, name)) + } + _ => None + } + } + + /// Execute this helper, attempting to discover a username/password pair. + /// + /// All I/O errors are ignored, (to match git behavior), and this function + /// only succeeds if both a username and a password were found + pub fn execute(&self) -> Option<(String, String)> { + let mut username = self.username.clone(); + let mut password = None; + for cmd in self.commands.iter() { + let (u, p) = self.execute_cmd(&cmd, &username); + if u.is_some() && username.is_none() { + username = u; + } + if p.is_some() && password.is_none() { + password = p; + } + if username.is_some() && password.is_some() { break } + } + + match (username, password) { + (Some(u), Some(p)) => Some((u, p)), + _ => None, + } + } + + // Execute the given `cmd`, providing the appropriate variables on stdin and + // then afterwards parsing the output into the username/password on stdout. + fn execute_cmd(&self, cmd: &str, username: &Option) + -> (Option, Option) { + macro_rules! my_try( ($e:expr) => ( + match $e { Ok(e) => e, Err(..) => return (None, None) } + ) ); + + let mut p = my_try!(Command::new("sh").arg("-c") + .arg(&format!("{} get", cmd)) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn()); + // Ignore write errors as the command may not actually be listening for + // stdin + { + let stdin = p.stdin.as_mut().unwrap(); + match self.protocol { + Some(ref p) => { let _ = writeln!(stdin, "protocol={}", p); } + None => {} + } + match self.host { + Some(ref p) => { let _ = writeln!(stdin, "host={}", p); } + None => {} + } + match *username { + Some(ref p) => { let _ = writeln!(stdin, "username={}", p); } + None => {} + } + } + let output = my_try!(p.wait_with_output()); + if !output.status.success() { return (None, None) } + return self.parse_output(output.stdout) + } + + // Parse the output of a command into the username/password found + fn parse_output(&self, output: Vec) -> (Option, Option) { + // Parse the output of the command, looking for username/password + let mut username = None; + let mut password = None; + for line in output.split(|t| *t == b'\n') { + let mut parts = line.splitn(2, |t| *t == b'='); + let key = parts.next().unwrap(); + let value = match parts.next() { Some(s) => s, None => continue }; + let value = match String::from_utf8(value.to_vec()) { + Ok(s) => s, + Err(..) => continue, + }; + match key { + b"username" => username = Some(value), + b"password" => password = Some(value), + _ => {} + } + } + (username, password) + } +} + +#[cfg(all(test, feature = "unstable"))] +mod test { + use std::env; + use std::fs::File; + use std::io::prelude::*; + use std::path::Path; + use tempdir::TempDir; + + use {Cred, Config, CredentialHelper, ConfigLevel}; + + macro_rules! cfg( ($($k:expr => $v:expr),*) => ({ + let td = TempDir::new("git2-rs").unwrap(); + let mut cfg = Config::new().unwrap(); + cfg.add_file(&td.path().join("cfg"), ConfigLevel::Highest, false).unwrap(); + $(cfg.set_str($k, $v).unwrap();)* + cfg + }) ); + + #[test] + fn smoke() { + Cred::default().unwrap(); + } + + #[test] + fn credential_helper1() { + let cfg = cfg! { + "credential.helper" => "!f() { echo username=a; echo password=b; }; f" + }; + let (u, p) = CredentialHelper::new("https://example.com/foo/bar") + .config(&cfg) + .execute().unwrap(); + assert_eq!(u, "a"); + assert_eq!(p, "b"); + } + + #[test] + fn credential_helper2() { + let cfg = cfg! {}; + assert!(CredentialHelper::new("https://example.com/foo/bar") + .config(&cfg) + .execute().is_none()); + } + + #[test] + fn credential_helper3() { + let cfg = cfg! { + "credential.https://example.com.helper" => + "!f() { echo username=c; }; f", + "credential.helper" => "!f() { echo username=a; echo password=b; }; f" + }; + let (u, p) = CredentialHelper::new("https://example.com/foo/bar") + .config(&cfg) + .execute().unwrap(); + assert_eq!(u, "c"); + assert_eq!(p, "b"); + } + + #[test] + fn credential_helper4() { + let td = TempDir::new("git2-rs").unwrap(); + let path = td.path().join("script"); + File::create(&path).unwrap().write(br"\ +#!/bin/sh +echo username=c +").unwrap(); + chmod(&path); + let cfg = cfg! { + "credential.https://example.com.helper" => + &path.display().to_string()[..], + "credential.helper" => "!f() { echo username=a; echo password=b; }; f" + }; + let (u, p) = CredentialHelper::new("https://example.com/foo/bar") + .config(&cfg) + .execute().unwrap(); + assert_eq!(u, "c"); + assert_eq!(p, "b"); + } + + #[test] + fn credential_helper5() { + let td = TempDir::new("git2-rs").unwrap(); + let path = td.path().join("git-credential-script"); + File::create(&path).unwrap().write(br"\ +#!/bin/sh +echo username=c +").unwrap(); + chmod(&path); + + let paths = env::var("PATH").unwrap(); + let paths = env::split_paths(&paths) + .chain(path.parent().map(|p| p.to_path_buf()).into_iter()); + env::set_var("PATH", &env::join_paths(paths).unwrap()); + + let cfg = cfg! { + "credential.https://example.com.helper" => "script", + "credential.helper" => "!f() { echo username=a; echo password=b; }; f" + }; + let (u, p) = CredentialHelper::new("https://example.com/foo/bar") + .config(&cfg) + .execute().unwrap(); + assert_eq!(u, "c"); + assert_eq!(p, "b"); + } + + #[test] + fn credential_helper6() { + let cfg = cfg! { + "credential.helper" => "" + }; + assert!(CredentialHelper::new("https://example.com/foo/bar") + .config(&cfg) + .execute().is_none()); + } + + #[cfg(unix)] + fn chmod(path: &Path) { + use std::os::unix::prelude::*; + use std::fs; + let mut perms = fs::metadata(path).unwrap().permissions(); + perms.set_mode(0o755); + fs::set_permissions(path, perms).unwrap(); + } + #[cfg(windows)] + fn chmod(_path: &Path) {} +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/describe.rs cargo-0.19.0/vendor/git2-0.6.4/src/describe.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/describe.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/describe.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,186 @@ +use std::marker; +use std::mem; +use std::ffi::CString; + +use libc::{c_uint, c_int}; + +use {raw, Repository, Error, Buf}; +use util::Binding; + +/// The result of a `describe` operation on either an `Describe` or a +/// `Repository`. +pub struct Describe<'repo> { + raw: *mut raw::git_describe_result, + _marker: marker::PhantomData<&'repo Repository>, +} + +/// Options which indicate how a `Describe` is created. +pub struct DescribeOptions { + raw: raw::git_describe_options, + pattern: CString, +} + +/// Options which can be used to customize how a description is formatted. +pub struct DescribeFormatOptions { + raw: raw::git_describe_format_options, + dirty_suffix: CString, +} + +impl<'repo> Describe<'repo> { + /// Prints this describe result, returning the result as a string. + pub fn format(&self, opts: Option<&DescribeFormatOptions>) + -> Result { + let buf = Buf::new(); + let opts = opts.map(|o| &o.raw as *const _).unwrap_or(0 as *const _); + unsafe { + try_call!(raw::git_describe_format(buf.raw(), self.raw, opts)); + } + Ok(String::from_utf8(buf.to_vec()).unwrap()) + } +} + +impl<'repo> Binding for Describe<'repo> { + type Raw = *mut raw::git_describe_result; + + unsafe fn from_raw(raw: *mut raw::git_describe_result) -> Describe<'repo> { + Describe { raw: raw, _marker: marker::PhantomData, } + } + fn raw(&self) -> *mut raw::git_describe_result { self.raw } +} + +impl<'repo> Drop for Describe<'repo> { + fn drop(&mut self) { + unsafe { raw::git_describe_result_free(self.raw) } + } +} + +impl DescribeFormatOptions { + /// Creates a new blank set of formatting options for a description. + pub fn new() -> DescribeFormatOptions { + let mut opts = DescribeFormatOptions { + raw: unsafe { mem::zeroed() }, + dirty_suffix: CString::new(Vec::new()).unwrap(), + }; + opts.raw.version = 1; + opts.raw.abbreviated_size = 7; + return opts + } + + /// Sets the size of the abbreviated commit id to use. + /// + /// The value is the lower bound for the length of the abbreviated string, + /// and the default is 7. + pub fn abbreviated_size(&mut self, size: u32) -> &mut Self { + self.raw.abbreviated_size = size as c_uint; + self + } + + /// Sets whether or not the long format is used even when a shorter name + /// could be used. + pub fn always_use_long_format(&mut self, long: bool) -> &mut Self { + self.raw.always_use_long_format = long as c_int; + self + } + + /// If the workdir is dirty and this is set, this string will be appended to + /// the description string. + pub fn dirty_suffix(&mut self, suffix: &str) -> &mut Self { + self.dirty_suffix = CString::new(suffix).unwrap(); + self.raw.dirty_suffix = self.dirty_suffix.as_ptr(); + self + } +} + +impl DescribeOptions { + /// Creates a new blank set of formatting options for a description. + pub fn new() -> DescribeOptions { + let mut opts = DescribeOptions { + raw: unsafe { mem::zeroed() }, + pattern: CString::new(Vec::new()).unwrap(), + }; + opts.raw.version = 1; + opts.raw.max_candidates_tags = 10; + return opts + } + + #[allow(missing_docs)] + pub fn max_candidates_tags(&mut self, max: u32) -> &mut Self { + self.raw.max_candidates_tags = max as c_uint; + self + } + + /// Sets the reference lookup strategy + /// + /// This behaves like the `--tags` option to git-decribe. + pub fn describe_tags(&mut self) -> &mut Self { + self.raw.describe_strategy = raw::GIT_DESCRIBE_TAGS as c_uint; + self + } + + /// Sets the reference lookup strategy + /// + /// This behaves like the `--all` option to git-decribe. + pub fn describe_all(&mut self) -> &mut Self { + self.raw.describe_strategy = raw::GIT_DESCRIBE_ALL as c_uint; + self + } + + /// Indicates when calculating the distance from the matching tag or + /// reference whether to only walk down the first-parent ancestry. + pub fn only_follow_first_parent(&mut self, follow: bool) -> &mut Self { + self.raw.only_follow_first_parent = follow as c_int; + self + } + + /// If no matching tag or reference is found whether a describe option would + /// normally fail. This option indicates, however, that it will instead fall + /// back to showing the full id of the commit. + pub fn show_commit_oid_as_fallback(&mut self, show: bool) -> &mut Self { + self.raw.show_commit_oid_as_fallback = show as c_int; + self + } + + #[allow(missing_docs)] + pub fn pattern(&mut self, pattern: &str) -> &mut Self { + self.pattern = CString::new(pattern).unwrap(); + self.raw.pattern = self.pattern.as_ptr(); + self + } +} + +impl Binding for DescribeOptions { + type Raw = *mut raw::git_describe_options; + + unsafe fn from_raw(_raw: *mut raw::git_describe_options) + -> DescribeOptions { + panic!("unimplemened") + } + fn raw(&self) -> *mut raw::git_describe_options { + &self.raw as *const _ as *mut _ + } +} + +#[cfg(test)] +mod tests { + use DescribeOptions; + + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + let head = t!(repo.head()).target().unwrap(); + + let d = t!(repo.describe(DescribeOptions::new() + .show_commit_oid_as_fallback(true))); + let id = head.to_string(); + assert_eq!(t!(d.format(None)), &id[..7]); + + let obj = t!(repo.find_object(head, None)); + let sig = t!(repo.signature()); + t!(repo.tag("foo", &obj, &sig, "message", true)); + let d = t!(repo.describe(&DescribeOptions::new())); + assert_eq!(t!(d.format(None)), "foo"); + + let d = t!(obj.describe(&DescribeOptions::new())); + assert_eq!(t!(d.format(None)), "foo"); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/diff.rs cargo-0.19.0/vendor/git2-0.6.4/src/diff.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/diff.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/diff.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,1237 @@ +use std::ffi::CString; +use std::marker; +use std::mem; +use std::ops::Range; +use std::path::Path; +use std::slice; +use libc::{c_char, size_t, c_void, c_int}; + +use {raw, panic, Buf, Delta, Oid, Repository, Error, DiffFormat}; +use {DiffStatsFormat, IntoCString}; +use util::{self, Binding}; + +/// The diff object that contains all individual file deltas. +/// +/// This is an opaque structure which will be allocated by one of the diff +/// generator functions on the `Repository` structure (e.g. `diff_tree_to_tree` +/// or other `diff_*` functions). +pub struct Diff<'repo> { + raw: *mut raw::git_diff, + _marker: marker::PhantomData<&'repo Repository>, +} + +unsafe impl<'repo> Send for Diff<'repo> {} + +/// Description of changes to one entry. +pub struct DiffDelta<'a> { + raw: *mut raw::git_diff_delta, + _marker: marker::PhantomData<&'a raw::git_diff_delta>, +} + +/// Description of one side of a delta. +/// +/// Although this is called a "file" it could represent a file, a symbolic +/// link, a submodule commit id, or even a tree (although that only happens if +/// you are tracking type changes or ignored/untracked directories). +pub struct DiffFile<'a> { + raw: *const raw::git_diff_file, + _marker: marker::PhantomData<&'a raw::git_diff_file>, +} + +/// Structure describing options about how the diff should be executed. +pub struct DiffOptions { + pathspec: Vec, + pathspec_ptrs: Vec<*const c_char>, + old_prefix: Option, + new_prefix: Option, + raw: raw::git_diff_options, +} + +/// Control behavior of rename and copy detection +pub struct DiffFindOptions { + raw: raw::git_diff_find_options, +} + +/// An iterator over the diffs in a delta +pub struct Deltas<'diff> { + range: Range, + diff: &'diff Diff<'diff>, +} + +/// Structure describing a line (or data span) of a diff. +pub struct DiffLine<'a> { + raw: *const raw::git_diff_line, + _marker: marker::PhantomData<&'a raw::git_diff_line>, +} + +/// Structure describing a hunk of a diff. +pub struct DiffHunk<'a> { + raw: *const raw::git_diff_hunk, + _marker: marker::PhantomData<&'a raw::git_diff_hunk>, +} + +/// Structure describing a hunk of a diff. +pub struct DiffStats { + raw: *mut raw::git_diff_stats, +} + +/// Structure describing the binary contents of a diff. +pub struct DiffBinary<'a> { + raw: *const raw::git_diff_binary, + _marker: marker::PhantomData<&'a raw::git_diff_binary>, +} + +/// The contents of one of the files in a binary diff. +pub struct DiffBinaryFile<'a> { + raw: *const raw::git_diff_binary_file, + _marker: marker::PhantomData<&'a raw::git_diff_binary_file>, +} + +/// When producing a binary diff, the binary data returned will be +/// either the deflated full ("literal") contents of the file, or +/// the deflated binary delta between the two sides (whichever is +/// smaller). +#[derive(Copy, Clone, Debug)] +pub enum DiffBinaryKind { + /// There is no binary delta + None, + /// The binary data is the literal contents of the file + Literal, + /// The binary data is the delta from one side to the other + Delta, +} + +type PrintCb<'a> = FnMut(DiffDelta, Option, DiffLine) -> bool + 'a; + +pub type FileCb<'a> = FnMut(DiffDelta, f32) -> bool + 'a; +pub type BinaryCb<'a> = FnMut(DiffDelta, DiffBinary) -> bool + 'a; +pub type HunkCb<'a> = FnMut(DiffDelta, DiffHunk) -> bool + 'a; +pub type LineCb<'a> = FnMut(DiffDelta, Option, DiffLine) -> bool + 'a; + +struct ForeachCallbacks<'a, 'b: 'a, 'c, 'd: 'c, 'e, 'f: 'e, 'g, 'h: 'g> { + file: &'a mut FileCb<'b>, + binary: Option<&'c mut BinaryCb<'d>>, + hunk: Option<&'e mut HunkCb<'f>>, + line: Option<&'g mut LineCb<'h>>, +} + +impl<'repo> Diff<'repo> { + /// Merge one diff into another. + /// + /// This merges items from the "from" list into the "self" list. The + /// resulting diff will have all items that appear in either list. + /// If an item appears in both lists, then it will be "merged" to appear + /// as if the old version was from the "onto" list and the new version + /// is from the "from" list (with the exception that if the item has a + /// pending DELETE in the middle, then it will show as deleted). + pub fn merge(&mut self, from: &Diff<'repo>) -> Result<(), Error> { + unsafe { try_call!(raw::git_diff_merge(self.raw, &*from.raw)); } + Ok(()) + } + + /// Returns an iterator over the deltas in this diff. + pub fn deltas(&self) -> Deltas { + let num_deltas = unsafe { raw::git_diff_num_deltas(&*self.raw) }; + Deltas { range: 0..(num_deltas as usize), diff: self } + } + + /// Return the diff delta for an entry in the diff list. + pub fn get_delta(&self, i: usize) -> Option { + unsafe { + let ptr = raw::git_diff_get_delta(&*self.raw, i as size_t); + Binding::from_raw_opt(ptr as *mut _) + } + } + + /// Check if deltas are sorted case sensitively or insensitively. + pub fn is_sorted_icase(&self) -> bool { + unsafe { raw::git_diff_is_sorted_icase(&*self.raw) == 1 } + } + + /// Iterate over a diff generating formatted text output. + /// + /// Returning `false` from the callback will terminate the iteration and + /// return an error from this function. + pub fn print(&self, format: DiffFormat, mut cb: F) -> Result<(), Error> + where F: FnMut(DiffDelta, + Option, + DiffLine) -> bool { + let mut cb: &mut PrintCb = &mut cb; + let ptr = &mut cb as *mut _; + unsafe { + try_call!(raw::git_diff_print(self.raw, format, print_cb, + ptr as *mut _)); + return Ok(()) + } + } + + /// Loop over all deltas in a diff issuing callbacks. + /// + /// Returning `false` from any callback will terminate the iteration and + /// return an error from this function. + pub fn foreach(&self, + file_cb: &mut FileCb, + binary_cb: Option<&mut BinaryCb>, + hunk_cb: Option<&mut HunkCb>, + line_cb: Option<&mut LineCb>) -> Result<(), Error> { + let mut cbs = ForeachCallbacks { + file: file_cb, + binary: binary_cb, + hunk: hunk_cb, + line: line_cb, + }; + let ptr = &mut cbs as *mut _; + unsafe { + let binary_cb_c = if cbs.binary.is_some() { + Some(binary_cb_c as raw::git_diff_binary_cb) + } else { + None + }; + let hunk_cb_c = if cbs.hunk.is_some() { + Some(hunk_cb_c as raw::git_diff_hunk_cb) + } else { + None + }; + let line_cb_c = if cbs.line.is_some() { + Some(line_cb_c as raw::git_diff_line_cb) + } else { + None + }; + try_call!(raw::git_diff_foreach(self.raw, file_cb_c, binary_cb_c, + hunk_cb_c, line_cb_c, + ptr as *mut _)); + return Ok(()) + } + } + + /// Accumulate diff statistics for all patches. + pub fn stats(&self) -> Result { + let mut ret = 0 as *mut raw::git_diff_stats; + unsafe { + try_call!(raw::git_diff_get_stats(&mut ret, self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// Transform a diff marking file renames, copies, etc. + /// + /// This modifies a diff in place, replacing old entries that look like + /// renames or copies with new entries reflecting those changes. This also + /// will, if requested, break modified files into add/remove pairs if the + /// amount of change is above a threshold. + pub fn find_similar(&mut self, opts: Option<&mut DiffFindOptions>) + -> Result<(), Error> { + let opts = opts.map(|opts| &opts.raw); + unsafe { try_call!(raw::git_diff_find_similar(self.raw, opts)); } + Ok(()) + } + + // TODO: num_deltas_of_type, format_email, find_similar +} + +pub extern fn print_cb(delta: *const raw::git_diff_delta, + hunk: *const raw::git_diff_hunk, + line: *const raw::git_diff_line, + data: *mut c_void) -> c_int { + unsafe { + let delta = Binding::from_raw(delta as *mut _); + let hunk = Binding::from_raw_opt(hunk); + let line = Binding::from_raw(line); + + let r = panic::wrap(|| { + let data = data as *mut &mut PrintCb; + (*data)(delta, hunk, line) + }); + if r == Some(true) {0} else {-1} + } +} + +extern fn file_cb_c(delta: *const raw::git_diff_delta, + progress: f32, + data: *mut c_void) -> c_int { + unsafe { + let delta = Binding::from_raw(delta as *mut _); + + let r = panic::wrap(|| { + let cbs = data as *mut ForeachCallbacks; + ((*cbs).file)(delta, progress) + }); + if r == Some(true) {0} else {-1} + } +} + +extern fn binary_cb_c(delta: *const raw::git_diff_delta, + binary: *const raw::git_diff_binary, + data: *mut c_void) -> c_int { + unsafe { + let delta = Binding::from_raw(delta as *mut _); + let binary = Binding::from_raw(binary); + + let r = panic::wrap(|| { + let cbs = data as *mut ForeachCallbacks; + match (*cbs).binary { + Some(ref mut cb) => cb(delta, binary), + None => false, + } + }); + if r == Some(true) {0} else {-1} + } +} + +extern fn hunk_cb_c(delta: *const raw::git_diff_delta, + hunk: *const raw::git_diff_hunk, + data: *mut c_void) -> c_int { + unsafe { + let delta = Binding::from_raw(delta as *mut _); + let hunk = Binding::from_raw(hunk); + + let r = panic::wrap(|| { + let cbs = data as *mut ForeachCallbacks; + match (*cbs).hunk { + Some(ref mut cb) => cb(delta, hunk), + None => false, + } + }); + if r == Some(true) {0} else {-1} + } +} + +extern fn line_cb_c(delta: *const raw::git_diff_delta, + hunk: *const raw::git_diff_hunk, + line: *const raw::git_diff_line, + data: *mut c_void) -> c_int { + unsafe { + let delta = Binding::from_raw(delta as *mut _); + let hunk = Binding::from_raw_opt(hunk); + let line = Binding::from_raw(line); + + let r = panic::wrap(|| { + let cbs = data as *mut ForeachCallbacks; + match (*cbs).line { + Some(ref mut cb) => cb(delta, hunk, line), + None => false, + } + }); + if r == Some(true) {0} else {-1} + } +} + + +impl<'repo> Binding for Diff<'repo> { + type Raw = *mut raw::git_diff; + unsafe fn from_raw(raw: *mut raw::git_diff) -> Diff<'repo> { + Diff { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *mut raw::git_diff { self.raw } +} + +impl<'repo> Drop for Diff<'repo> { + fn drop(&mut self) { + unsafe { raw::git_diff_free(self.raw) } + } +} + +impl<'a> DiffDelta<'a> { + // TODO: expose when diffs are more exposed + // pub fn similarity(&self) -> u16 { + // unsafe { (*self.raw).similarity } + // } + + /// Returns the number of files in this delta. + pub fn nfiles(&self) -> u16 { + unsafe { (*self.raw).nfiles } + } + + /// Returns the status of this entry + /// + /// For more information, see `Delta`'s documentation + pub fn status(&self) -> Delta { + match unsafe { (*self.raw).status } { + raw::GIT_DELTA_UNMODIFIED => Delta::Unmodified, + raw::GIT_DELTA_ADDED => Delta::Added, + raw::GIT_DELTA_DELETED => Delta::Deleted, + raw::GIT_DELTA_MODIFIED => Delta::Modified, + raw::GIT_DELTA_RENAMED => Delta::Renamed, + raw::GIT_DELTA_COPIED => Delta::Copied, + raw::GIT_DELTA_IGNORED => Delta::Ignored, + raw::GIT_DELTA_UNTRACKED => Delta::Untracked, + raw::GIT_DELTA_TYPECHANGE => Delta::Typechange, + raw::GIT_DELTA_UNREADABLE => Delta::Unreadable, + raw::GIT_DELTA_CONFLICTED => Delta::Conflicted, + n => panic!("unknown diff status: {}", n), + } + } + + /// Return the file which represents the "from" side of the diff. + /// + /// What side this means depends on the function that was used to generate + /// the diff and will be documented on the function itself. + pub fn old_file(&self) -> DiffFile<'a> { + unsafe { Binding::from_raw(&(*self.raw).old_file as *const _) } + } + + /// Return the file which represents the "to" side of the diff. + /// + /// What side this means depends on the function that was used to generate + /// the diff and will be documented on the function itself. + pub fn new_file(&self) -> DiffFile<'a> { + unsafe { Binding::from_raw(&(*self.raw).new_file as *const _) } + } +} + +impl<'a> Binding for DiffDelta<'a> { + type Raw = *mut raw::git_diff_delta; + unsafe fn from_raw(raw: *mut raw::git_diff_delta) -> DiffDelta<'a> { + DiffDelta { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *mut raw::git_diff_delta { self.raw } +} + +impl<'a> DiffFile<'a> { + /// Returns the Oid of this item. + /// + /// If this entry represents an absent side of a diff (e.g. the `old_file` + /// of a `Added` delta), then the oid returned will be zeroes. + pub fn id(&self) -> Oid { + unsafe { Binding::from_raw(&(*self.raw).id as *const _) } + } + + /// Returns the path, in bytes, of the entry relative to the working + /// directory of the repository. + pub fn path_bytes(&self) -> Option<&'a [u8]> { + static FOO: () = (); + unsafe { ::opt_bytes(&FOO, (*self.raw).path) } + } + + /// Returns the path of the entry relative to the working directory of the + /// repository. + pub fn path(&self) -> Option<&'a Path> { + self.path_bytes().map(util::bytes2path) + } + + /// Returns the size of this entry, in bytes + pub fn size(&self) -> u64 { unsafe { (*self.raw).size as u64 } } + + // TODO: expose flags/mode +} + +impl<'a> Binding for DiffFile<'a> { + type Raw = *const raw::git_diff_file; + unsafe fn from_raw(raw: *const raw::git_diff_file) -> DiffFile<'a> { + DiffFile { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *const raw::git_diff_file { self.raw } +} + +impl DiffOptions { + /// Creates a new set of empty diff options. + /// + /// All flags and other options are defaulted to false or their otherwise + /// zero equivalents. + pub fn new() -> DiffOptions { + let mut opts = DiffOptions { + pathspec: Vec::new(), + pathspec_ptrs: Vec::new(), + raw: unsafe { mem::zeroed() }, + old_prefix: None, + new_prefix: None, + }; + assert_eq!(unsafe { + raw::git_diff_init_options(&mut opts.raw, 1) + }, 0); + opts + } + + fn flag(&mut self, opt: u32, val: bool) -> &mut DiffOptions { + if val { + self.raw.flags |= opt; + } else { + self.raw.flags &= !opt; + } + self + } + + /// Flag indicating whether the sides of the diff will be reversed. + pub fn reverse(&mut self, reverse: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_REVERSE, reverse) + } + + /// Flag indicating whether ignored files are included. + pub fn include_ignored(&mut self, include: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_INCLUDE_IGNORED, include) + } + + /// Flag indicating whether ignored directories are traversed deeply or not. + pub fn recurse_ignored_dirs(&mut self, recurse: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_RECURSE_IGNORED_DIRS, recurse) + } + + /// Flag indicating whether untracked files are in the diff + pub fn include_untracked(&mut self, include: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_INCLUDE_UNTRACKED, include) + } + + /// Flag indicating whether untracked directories are deeply traversed or + /// not. + pub fn recurse_untracked_dirs(&mut self, recurse: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_RECURSE_UNTRACKED_DIRS, recurse) + } + + /// Flag indicating whether unmodified files are in the diff. + pub fn include_unmodified(&mut self, include: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_INCLUDE_UNMODIFIED, include) + } + + /// If entrabled, then Typechange delta records are generated. + pub fn include_typechange(&mut self, include: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_INCLUDE_TYPECHANGE, include) + } + + /// Event with `include_typechange`, the tree treturned generally shows a + /// deleted blow. This flag correctly labels the tree transitions as a + /// typechange record with the `new_file`'s mode set to tree. + /// + /// Note that the tree SHA will not be available. + pub fn include_typechange_trees(&mut self, include: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_INCLUDE_TYPECHANGE_TREES, include) + } + + /// Flag indicating whether file mode changes are ignored. + pub fn ignore_filemode(&mut self, ignore: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_IGNORE_FILEMODE, ignore) + } + + /// Flag indicating whether all submodules should be treated as unmodified. + pub fn ignore_submodules(&mut self, ignore: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_IGNORE_SUBMODULES, ignore) + } + + /// Flag indicating whether case insensitive filenames should be used. + pub fn ignore_case(&mut self, ignore: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_IGNORE_CASE, ignore) + } + + /// If pathspecs are specified, this flag means that they should be applied + /// as an exact match instead of a fnmatch pattern. + pub fn disable_pathspec_match(&mut self, disable: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_DISABLE_PATHSPEC_MATCH, disable) + } + + /// Disable updating the `binary` flag in delta records. This is useful when + /// iterating over a diff if you don't need hunk and data callbacks and want + /// to avoid having to load a file completely. + pub fn skip_binary_check(&mut self, skip: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_SKIP_BINARY_CHECK, skip) + } + + /// When diff finds an untracked directory, to match the behavior of core + /// Git, it scans the contents for ignored and untracked files. If all + /// contents are ignored, then the directory is ignored; if any contents are + /// not ignored, then the directory is untracked. This is extra work that + /// may not matter in many cases. + /// + /// This flag turns off that scan and immediately labels an untracked + /// directory as untracked (changing the behavior to not match core git). + pub fn enable_fast_untracked_dirs(&mut self, enable: bool) + -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_ENABLE_FAST_UNTRACKED_DIRS, enable) + } + + /// When diff finds a file in the working directory with stat information + /// different from the index, but the OID ends up being the same, write the + /// correct stat information into the index. Note: without this flag, diff + /// will always leave the index untouched. + pub fn update_index(&mut self, update: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_UPDATE_INDEX, update) + } + + /// Include unreadable files in the diff + pub fn include_unreadable(&mut self, include: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_INCLUDE_UNREADABLE, include) + } + + /// Include unreadable files in the diff + pub fn include_unreadable_as_untracked(&mut self, include: bool) + -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_INCLUDE_UNREADABLE_AS_UNTRACKED, include) + } + + /// Treat all files as text, disabling binary attributes and detection. + pub fn force_text(&mut self, force: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_FORCE_TEXT, force) + } + + /// Treat all files as binary, disabling text diffs + pub fn force_binary(&mut self, force: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_FORCE_TEXT, force) + } + + /// Ignore all whitespace + pub fn ignore_whitespace(&mut self, ignore: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_IGNORE_WHITESPACE, ignore) + } + + /// Ignore changes in the amount of whitespace + pub fn ignore_whitespace_change(&mut self, ignore: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_IGNORE_WHITESPACE_CHANGE, ignore) + } + + /// Ignore whitespace at tend of line + pub fn ignore_whitespace_eol(&mut self, ignore: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_IGNORE_WHITESPACE_EOL, ignore) + } + + /// When generating patch text, include the content of untracked files. + /// + /// This automatically turns on `include_untracked` but it does not turn on + /// `recurse_untracked_dirs`. Add that flag if you want the content of every + /// single untracked file. + pub fn show_untracked_content(&mut self, show: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_SHOW_UNTRACKED_CONTENT, show) + } + + /// When generating output, include the names of unmodified files if they + /// are included in the `Diff`. Normally these are skipped in the formats + /// that list files (e.g. name-only, name-status, raw). Even with this these + /// will not be included in the patch format. + pub fn show_unmodified(&mut self, show: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_SHOW_UNMODIFIED, show) + } + + /// Use the "patience diff" algorithm + pub fn patience(&mut self, patience: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_PATIENCE, patience) + } + + /// Take extra time to find the minimal diff + pub fn minimal(&mut self, minimal: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_MINIMAL, minimal) + } + + /// Include the necessary deflate/delta information so that `git-apply` can + /// apply given diff information to binary files. + pub fn show_binary(&mut self, show: bool) -> &mut DiffOptions { + self.flag(raw::GIT_DIFF_SHOW_BINARY, show) + } + + /// Set the number of unchanged lines that define the boundary of a hunk + /// (and to display before and after). + /// + /// The default value for this is 3. + pub fn context_lines(&mut self, lines: u32) -> &mut DiffOptions { + self.raw.context_lines = lines; + self + } + + /// Set the maximum number of unchanged lines between hunk boundaries before + /// the hunks will be merged into one. + /// + /// The default value for this is 0. + pub fn interhunk_lines(&mut self, lines: u32) -> &mut DiffOptions { + self.raw.interhunk_lines = lines; + self + } + + /// The default value for this is `core.abbrev` or 7 if unset. + pub fn id_abbrev(&mut self, abbrev: u16) -> &mut DiffOptions { + self.raw.id_abbrev = abbrev; + self + } + + /// Maximum size (in bytes) above which a blob will be marked as binary + /// automatically. + /// + /// A negative value will disable this entirely. + /// + /// The default value for this is 512MB. + pub fn max_size(&mut self, size: i64) -> &mut DiffOptions { + self.raw.max_size = size as raw::git_off_t; + self + } + + /// The virtual "directory" to prefix old file names with in hunk headers. + /// + /// The default value for this is "a". + pub fn old_prefix(&mut self, t: T) -> &mut DiffOptions { + self.old_prefix = Some(t.into_c_string().unwrap()); + self + } + + /// The virtual "directory" to prefix new file names with in hunk headers. + /// + /// The default value for this is "b". + pub fn new_prefix(&mut self, t: T) -> &mut DiffOptions { + self.new_prefix = Some(t.into_c_string().unwrap()); + self + } + + /// Add to the array of paths/fnmatch patterns to constrain the diff. + pub fn pathspec(&mut self, pathspec: T) + -> &mut DiffOptions { + let s = pathspec.into_c_string().unwrap(); + self.pathspec_ptrs.push(s.as_ptr()); + self.pathspec.push(s); + self + } + + /// Acquire a pointer to the underlying raw options. + /// + /// This function is unsafe as the pointer is only valid so long as this + /// structure is not moved, modified, or used elsewhere. + pub unsafe fn raw(&mut self) -> *const raw::git_diff_options { + self.raw.old_prefix = self.old_prefix.as_ref().map(|s| s.as_ptr()) + .unwrap_or(0 as *const _); + self.raw.new_prefix = self.new_prefix.as_ref().map(|s| s.as_ptr()) + .unwrap_or(0 as *const _); + self.raw.pathspec.count = self.pathspec_ptrs.len() as size_t; + self.raw.pathspec.strings = self.pathspec_ptrs.as_ptr() as *mut _; + &self.raw as *const _ + } + + // TODO: expose ignore_submodules, notify_cb/notify_payload +} + +impl<'diff> Iterator for Deltas<'diff> { + type Item = DiffDelta<'diff>; + fn next(&mut self) -> Option> { + self.range.next().and_then(|i| self.diff.get_delta(i)) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +impl<'diff> DoubleEndedIterator for Deltas<'diff> { + fn next_back(&mut self) -> Option> { + self.range.next_back().and_then(|i| self.diff.get_delta(i)) + } +} +impl<'diff> ExactSizeIterator for Deltas<'diff> {} + +impl<'a> DiffLine<'a> { + /// Line number in old file or `None` for added line + pub fn old_lineno(&self) -> Option { + match unsafe { (*self.raw).old_lineno } { + n if n < 0 => None, + n => Some(n as u32), + } + } + + /// Line number in new file or `None` for deleted line + pub fn new_lineno(&self) -> Option { + match unsafe { (*self.raw).new_lineno } { + n if n < 0 => None, + n => Some(n as u32), + } + } + + /// Number of newline characters in content + pub fn num_lines(&self) -> u32 { + unsafe { (*self.raw).num_lines as u32 } + } + + /// Offset in the original file to the content + pub fn content_offset(&self) -> i64 { + unsafe { (*self.raw).content_offset as i64 } + } + + /// Content of this line as bytes. + pub fn content(&self) -> &[u8] { + unsafe { + slice::from_raw_parts((*self.raw).content as *const u8, + (*self.raw).content_len as usize) + } + } + + /// Sigil showing the origin of this `DiffLine`. + /// + /// * ` ` - Line context + /// * `+` - Line addition + /// * `-` - Line deletion + /// * `=` - Context (End of file) + /// * `>` - Add (End of file) + /// * `<` - Remove (End of file) + /// * `F` - File header + /// * `H` - Hunk header + /// * `B` - Line binary + pub fn origin(&self) -> char { + match unsafe { (*self.raw).origin as raw::git_diff_line_t } { + raw::GIT_DIFF_LINE_CONTEXT => ' ', + raw::GIT_DIFF_LINE_ADDITION => '+', + raw::GIT_DIFF_LINE_DELETION => '-', + raw::GIT_DIFF_LINE_CONTEXT_EOFNL => '=', + raw::GIT_DIFF_LINE_ADD_EOFNL => '>', + raw::GIT_DIFF_LINE_DEL_EOFNL => '<', + raw::GIT_DIFF_LINE_FILE_HDR => 'F', + raw::GIT_DIFF_LINE_HUNK_HDR => 'H', + raw::GIT_DIFF_LINE_BINARY => 'B', + _ => ' ', + } + } +} + +impl<'a> Binding for DiffLine<'a> { + type Raw = *const raw::git_diff_line; + unsafe fn from_raw(raw: *const raw::git_diff_line) -> DiffLine<'a> { + DiffLine { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *const raw::git_diff_line { self.raw } +} + +impl<'a> DiffHunk<'a> { + /// Starting line number in old_file + pub fn old_start(&self) -> u32 { + unsafe { (*self.raw).old_start as u32 } + } + + /// Number of lines in old_file + pub fn old_lines(&self) -> u32 { + unsafe { (*self.raw).old_lines as u32 } + } + + /// Starting line number in new_file + pub fn new_start(&self) -> u32 { + unsafe { (*self.raw).new_start as u32 } + } + + /// Number of lines in new_file + pub fn new_lines(&self) -> u32 { + unsafe { (*self.raw).new_lines as u32 } + } + + /// Header text + pub fn header(&self) -> &[u8] { + unsafe { + slice::from_raw_parts((*self.raw).header.as_ptr() as *const u8, + (*self.raw).header_len as usize) + } + } +} + +impl<'a> Binding for DiffHunk<'a> { + type Raw = *const raw::git_diff_hunk; + unsafe fn from_raw(raw: *const raw::git_diff_hunk) -> DiffHunk<'a> { + DiffHunk { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *const raw::git_diff_hunk { self.raw } +} + +impl DiffStats { + /// Get the total number of files chaned in a diff. + pub fn files_changed(&self) -> usize { + unsafe { raw::git_diff_stats_files_changed(&*self.raw) as usize } + } + + /// Get the total number of insertions in a diff + pub fn insertions(&self) -> usize { + unsafe { raw::git_diff_stats_insertions(&*self.raw) as usize } + } + + /// Get the total number of deletions in a diff + pub fn deletions(&self) -> usize { + unsafe { raw::git_diff_stats_deletions(&*self.raw) as usize } + } + + /// Print diff statistics to a Buf + pub fn to_buf(&self, format: DiffStatsFormat, width: usize) + -> Result { + let buf = Buf::new(); + unsafe { + try_call!(raw::git_diff_stats_to_buf(buf.raw(), self.raw, + format.bits(), + width as size_t)); + } + Ok(buf) + } +} + +impl Binding for DiffStats { + type Raw = *mut raw::git_diff_stats; + + unsafe fn from_raw(raw: *mut raw::git_diff_stats) -> DiffStats { + DiffStats { raw: raw } + } + fn raw(&self) -> *mut raw::git_diff_stats { self.raw } +} + +impl Drop for DiffStats { + fn drop(&mut self) { + unsafe { raw::git_diff_stats_free(self.raw) } + } +} + +impl<'a> DiffBinary<'a> { + /// Returns whether there is data in this binary structure or not. + /// + /// If this is `true`, then this was produced and included binary content. + /// If this is `false` then this was generated knowing only that a binary + /// file changed but without providing the data, probably from a patch that + /// said `Binary files a/file.txt and b/file.txt differ`. + pub fn contains_data(&self) -> bool { + unsafe { (*self.raw).contains_data == 1 } + } + + /// The contents of the old file. + pub fn old_file(&self) -> DiffBinaryFile<'a> { + unsafe { Binding::from_raw(&(*self.raw).old_file as *const _) } + } + + /// The contents of the new file. + pub fn new_file(&self) -> DiffBinaryFile<'a> { + unsafe { Binding::from_raw(&(*self.raw).new_file as *const _) } + } +} + +impl<'a> Binding for DiffBinary<'a> { + type Raw = *const raw::git_diff_binary; + unsafe fn from_raw(raw: *const raw::git_diff_binary) -> DiffBinary<'a> { + DiffBinary { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *const raw::git_diff_binary { self.raw } +} + +impl<'a> DiffBinaryFile<'a> { + /// The type of binary data for this file + pub fn kind(&self) -> DiffBinaryKind { + unsafe { Binding::from_raw((*self.raw).kind) } + } + + /// The binary data, deflated + pub fn data(&self) -> &[u8] { + unsafe { + slice::from_raw_parts((*self.raw).data as *const u8, + (*self.raw).datalen as usize) + } + } + + /// The length of the binary data after inflation + pub fn inflated_len(&self) -> usize { + unsafe { (*self.raw).inflatedlen as usize } + } + +} + +impl<'a> Binding for DiffBinaryFile<'a> { + type Raw = *const raw::git_diff_binary_file; + unsafe fn from_raw(raw: *const raw::git_diff_binary_file) -> DiffBinaryFile<'a> { + DiffBinaryFile { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *const raw::git_diff_binary_file { self.raw } +} + +impl Binding for DiffBinaryKind { + type Raw = raw::git_diff_binary_t; + unsafe fn from_raw(raw: raw::git_diff_binary_t) -> DiffBinaryKind { + match raw { + raw::GIT_DIFF_BINARY_NONE => DiffBinaryKind::None, + raw::GIT_DIFF_BINARY_LITERAL => DiffBinaryKind::Literal, + raw::GIT_DIFF_BINARY_DELTA => DiffBinaryKind::Delta, + _ => panic!("Unknown git diff binary kind"), + } + } + fn raw(&self) -> raw::git_diff_binary_t { + match *self { + DiffBinaryKind::None => raw::GIT_DIFF_BINARY_NONE, + DiffBinaryKind::Literal => raw::GIT_DIFF_BINARY_LITERAL, + DiffBinaryKind::Delta => raw::GIT_DIFF_BINARY_DELTA, + } + } +} + +impl DiffFindOptions { + /// Creates a new set of empty diff find options. + /// + /// All flags and other options are defaulted to false or their otherwise + /// zero equivalents. + pub fn new() -> DiffFindOptions { + let mut opts = DiffFindOptions { + raw: unsafe { mem::zeroed() }, + }; + assert_eq!(unsafe { + raw::git_diff_find_init_options(&mut opts.raw, 1) + }, 0); + opts + } + + fn flag(&mut self, opt: u32, val: bool) -> &mut DiffFindOptions { + if val { + self.raw.flags |= opt; + } else { + self.raw.flags &= !opt; + } + self + } + + /// Reset all flags back to their unset state, indicating that + /// `diff.renames` should be used instead. This is overridden once any flag + /// is set. + pub fn by_config(&mut self) -> &mut DiffFindOptions { + self.flag(0xffffffff, false) + } + + /// Look for renames? + pub fn renames(&mut self, find: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_RENAMES, find) + } + + /// Consider old side of modified for renames? + pub fn renames_from_rewrites(&mut self, find: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_RENAMES_FROM_REWRITES, find) + } + + /// Look for copies? + pub fn copies(&mut self, find: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_COPIES, find) + } + + /// Consider unmodified as copy sources? + /// + /// For this to work correctly, use `include_unmodified` when the initial + /// diff is being generated. + pub fn copies_from_unmodified(&mut self, find: bool) + -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_COPIES_FROM_UNMODIFIED, find) + } + + /// Mark significant rewrites for split. + pub fn rewrites(&mut self, find: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_REWRITES, find) + } + + /// Actually split large rewrites into delete/add pairs + pub fn break_rewrites(&mut self, find: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_BREAK_REWRITES, find) + } + + #[doc(hidden)] + pub fn break_rewries(&mut self, find: bool) -> &mut DiffFindOptions { + self.break_rewrites(find) + } + + /// Find renames/copies for untracked items in working directory. + /// + /// For this to work correctly use the `include_untracked` option when the + /// initial diff is being generated. + pub fn for_untracked(&mut self, find: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_FOR_UNTRACKED, find) + } + + /// Turn on all finding features. + pub fn all(&mut self, find: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_ALL, find) + } + + /// Measure similarity ignoring leading whitespace (default) + pub fn ignore_leading_whitespace(&mut self, ignore: bool) + -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_IGNORE_LEADING_WHITESPACE, ignore) + } + + /// Measure similarity ignoring all whitespace + pub fn ignore_whitespace(&mut self, ignore: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_IGNORE_WHITESPACE, ignore) + } + + /// Measure similarity including all data + pub fn dont_ignore_whitespace(&mut self, dont: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_DONT_IGNORE_WHITESPACE, dont) + } + + /// Measure similarity only by comparing SHAs (fast and cheap) + pub fn exact_match_only(&mut self, exact: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_EXACT_MATCH_ONLY, exact) + } + + /// Do not break rewrites unless they contribute to a rename. + /// + /// Normally, `break_rewrites` and `rewrites` will measure the + /// self-similarity of modified files and split the ones that have changed a + /// lot into a delete/add pair. Then the sides of that pair will be + /// considered candidates for rename and copy detection + /// + /// If you add this flag in and the split pair is not used for an actual + /// rename or copy, then the modified record will be restored to a regular + /// modified record instead of being split. + pub fn break_rewrites_for_renames_only(&mut self, b: bool) + -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_BREAK_REWRITES_FOR_RENAMES_ONLY, b) + } + + /// Remove any unmodified deltas after find_similar is done. + /// + /// Using `copies_from_unmodified` to emulate the `--find-copies-harder` + /// behavior requires building a diff with the `include_unmodified` flag. If + /// you do not want unmodified records in the final result, pas this flag to + /// have them removed. + pub fn remove_unmodified(&mut self, remove: bool) -> &mut DiffFindOptions { + self.flag(raw::GIT_DIFF_FIND_REMOVE_UNMODIFIED, remove) + } + + /// Similarity to consider a file renamed (default 50) + pub fn rename_threshold(&mut self, thresh: u16) -> &mut DiffFindOptions { + self.raw.rename_threshold = thresh; + self + } + + /// Similarity of modified to be glegible rename source (default 50) + pub fn rename_from_rewrite_threshold(&mut self, thresh: u16) + -> &mut DiffFindOptions { + self.raw.rename_from_rewrite_threshold = thresh; + self + } + + /// Similarity to consider a file copy (default 50) + pub fn copy_threshold(&mut self, thresh: u16) -> &mut DiffFindOptions { + self.raw.copy_threshold = thresh; + self + } + + /// Similarity to split modify into delete/add pair (default 60) + pub fn break_rewrite_threshold(&mut self, thresh: u16) + -> &mut DiffFindOptions { + self.raw.break_rewrite_threshold = thresh; + self + } + + /// Maximum similarity sources to examine for a file (somewhat like + /// git-diff's `-l` option or `diff.renameLimit` config) + /// + /// Defaults to 200 + pub fn rename_limit(&mut self, limit: usize) -> &mut DiffFindOptions { + self.raw.rename_limit = limit as size_t; + self + } + + // TODO: expose git_diff_similarity_metric +} + +#[cfg(test)] +mod tests { + use DiffOptions; + use std::fs::File; + use std::path::Path; + use std::borrow::Borrow; + use std::io::Write; + + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + let diff = repo.diff_tree_to_workdir(None, None).unwrap(); + assert_eq!(diff.deltas().len(), 0); + let stats = diff.stats().unwrap(); + assert_eq!(stats.insertions(), 0); + assert_eq!(stats.deletions(), 0); + assert_eq!(stats.files_changed(), 0); + } + + #[test] + fn foreach_smoke() { + let (_td, repo) = ::test::repo_init(); + let diff = t!(repo.diff_tree_to_workdir(None, None)); + let mut count = 0; + t!(diff.foreach(&mut |_file, _progress| { count = count + 1; true }, + None, None, None)); + assert_eq!(count, 0); + } + + #[test] + fn foreach_file_only() { + let path = Path::new("foo"); + let (td, repo) = ::test::repo_init(); + t!(t!(File::create(&td.path().join(path))).write_all(b"bar")); + let mut opts = DiffOptions::new(); + opts.include_untracked(true); + let diff = t!(repo.diff_tree_to_workdir(None, Some(&mut opts))); + let mut count = 0; + let mut result = None; + t!(diff.foreach(&mut |file, _progress| { + count = count + 1; + result = file.new_file().path().map(ToOwned::to_owned); + true + }, None, None, None)); + assert_eq!(result.as_ref().map(Borrow::borrow), Some(path)); + assert_eq!(count, 1); + } + + #[test] + fn foreach_file_and_hunk() { + let path = Path::new("foo"); + let (td, repo) = ::test::repo_init(); + t!(t!(File::create(&td.path().join(path))).write_all(b"bar")); + let mut index = t!(repo.index()); + t!(index.add_path(path)); + let mut opts = DiffOptions::new(); + opts.include_untracked(true); + let diff = t!(repo.diff_tree_to_index(None, Some(&index), + Some(&mut opts))); + let mut new_lines = 0; + t!(diff.foreach( + &mut |_file, _progress| { true }, + None, + Some(&mut |_file, hunk| { + new_lines = hunk.new_lines(); + true + }), + None)); + assert_eq!(new_lines, 1); + } + + #[test] + fn foreach_all_callbacks() { + let fib = vec![0, 1, 1, 2, 3, 5, 8]; + // Verified with a node implementation of deflate, might be worth + // adding a deflate lib to do this inline here. + let deflated_fib = vec![120, 156, 99, 96, 100, 100, 98, 102, 229, 0, 0, + 0, 53, 0, 21]; + let foo_path = Path::new("foo"); + let bin_path = Path::new("bin"); + let (td, repo) = ::test::repo_init(); + t!(t!(File::create(&td.path().join(foo_path))).write_all(b"bar\n")); + t!(t!(File::create(&td.path().join(bin_path))).write_all(&fib)); + let mut index = t!(repo.index()); + t!(index.add_path(foo_path)); + t!(index.add_path(bin_path)); + let mut opts = DiffOptions::new(); + opts.include_untracked(true).show_binary(true); + let diff = t!(repo.diff_tree_to_index(None, Some(&index), + Some(&mut opts))); + let mut bin_content = None; + let mut new_lines = 0; + let mut line_content = None; + t!(diff.foreach( + &mut |_file, _progress| { true }, + Some(&mut |_file, binary| { + bin_content = Some(binary.new_file().data().to_owned()); + true + }), + Some(&mut |_file, hunk| { + new_lines = hunk.new_lines(); + true + }), + Some(&mut |_file, _hunk, line| { + line_content = String::from_utf8(line.content().into()).ok(); + true + }))); + assert_eq!(bin_content, Some(deflated_fib)); + assert_eq!(new_lines, 1); + assert_eq!(line_content, Some("bar\n".to_string())); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/error.rs cargo-0.19.0/vendor/git2-0.6.4/src/error.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/error.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/error.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,237 @@ +use std::env::JoinPathsError; +use std::ffi::{CStr, NulError}; +use std::error; +use std::fmt; +use std::str; +use libc::c_int; + +use {raw, ErrorClass, ErrorCode}; + +/// A structure to represent errors coming out of libgit2. +#[derive(Debug,PartialEq)] +pub struct Error { + code: c_int, + klass: c_int, + message: String, +} + +impl Error { + /// Returns the last error, or `None` if one is not available. + pub fn last_error(code: c_int) -> Option { + ::init(); + unsafe { + let ptr = raw::giterr_last(); + if ptr.is_null() { + None + } else { + Some(Error::from_raw(code, ptr)) + } + } + } + + unsafe fn from_raw(code: c_int, ptr: *const raw::git_error) -> Error { + let msg = CStr::from_ptr((*ptr).message as *const _).to_bytes(); + let msg = str::from_utf8(msg).unwrap(); + Error { code: code, klass: (*ptr).klass, message: msg.to_string() } + } + + /// Creates a new error from the given string as the error. + pub fn from_str(s: &str) -> Error { + Error { + code: raw::GIT_ERROR as c_int, + klass: raw::GITERR_NONE as c_int, + message: s.to_string(), + } + } + + /// Return the error code associated with this error. + pub fn code(&self) -> ErrorCode { + match self.raw_code() { + raw::GIT_OK => super::ErrorCode::GenericError, + raw::GIT_ERROR => super::ErrorCode::GenericError, + raw::GIT_ENOTFOUND => super::ErrorCode::NotFound, + raw::GIT_EEXISTS => super::ErrorCode::Exists, + raw::GIT_EAMBIGUOUS => super::ErrorCode::Ambiguous, + raw::GIT_EBUFS => super::ErrorCode::BufSize, + raw::GIT_EUSER => super::ErrorCode::User, + raw::GIT_EBAREREPO => super::ErrorCode::BareRepo, + raw::GIT_EUNBORNBRANCH => super::ErrorCode::UnbornBranch, + raw::GIT_EUNMERGED => super::ErrorCode::Unmerged, + raw::GIT_ENONFASTFORWARD => super::ErrorCode::NotFastForward, + raw::GIT_EINVALIDSPEC => super::ErrorCode::InvalidSpec, + raw::GIT_ECONFLICT => super::ErrorCode::Conflict, + raw::GIT_ELOCKED => super::ErrorCode::Locked, + raw::GIT_EMODIFIED => super::ErrorCode::Modified, + raw::GIT_PASSTHROUGH => super::ErrorCode::GenericError, + raw::GIT_ITEROVER => super::ErrorCode::GenericError, + raw::GIT_EAUTH => super::ErrorCode::Auth, + raw::GIT_ECERTIFICATE => super::ErrorCode::Certificate, + raw::GIT_EAPPLIED => super::ErrorCode::Applied, + raw::GIT_EPEEL => super::ErrorCode::Peel, + raw::GIT_EEOF => super::ErrorCode::Eof, + raw::GIT_EINVALID => super::ErrorCode::Invalid, + raw::GIT_EUNCOMMITTED => super::ErrorCode::Uncommitted, + raw::GIT_EDIRECTORY => super::ErrorCode::Directory, + _ => super::ErrorCode::GenericError, + } + } + + /// Return the error class associated with this error. + pub fn class(&self) -> ErrorClass { + match self.raw_class() { + raw::GITERR_NONE => super::ErrorClass::None, + raw::GITERR_NOMEMORY => super::ErrorClass::NoMemory, + raw::GITERR_OS => super::ErrorClass::Os, + raw::GITERR_INVALID => super::ErrorClass::Invalid, + raw::GITERR_REFERENCE => super::ErrorClass::Reference, + raw::GITERR_ZLIB => super::ErrorClass::Zlib, + raw::GITERR_REPOSITORY => super::ErrorClass::Repository, + raw::GITERR_CONFIG => super::ErrorClass::Config, + raw::GITERR_REGEX => super::ErrorClass::Regex, + raw::GITERR_ODB => super::ErrorClass::Odb, + raw::GITERR_INDEX => super::ErrorClass::Index, + raw::GITERR_OBJECT => super::ErrorClass::Object, + raw::GITERR_NET => super::ErrorClass::Net, + raw::GITERR_TAG => super::ErrorClass::Tag, + raw::GITERR_TREE => super::ErrorClass::Tree, + raw::GITERR_INDEXER => super::ErrorClass::Indexer, + raw::GITERR_SSL => super::ErrorClass::Ssl, + raw::GITERR_SUBMODULE => super::ErrorClass::Submodule, + raw::GITERR_THREAD => super::ErrorClass::Thread, + raw::GITERR_STASH => super::ErrorClass::Stash, + raw::GITERR_CHECKOUT => super::ErrorClass::Checkout, + raw::GITERR_FETCHHEAD => super::ErrorClass::FetchHead, + raw::GITERR_MERGE => super::ErrorClass::Merge, + raw::GITERR_SSH => super::ErrorClass::Ssh, + raw::GITERR_FILTER => super::ErrorClass::Filter, + raw::GITERR_REVERT => super::ErrorClass::Revert, + raw::GITERR_CALLBACK => super::ErrorClass::Callback, + raw::GITERR_CHERRYPICK => super::ErrorClass::CherryPick, + raw::GITERR_DESCRIBE => super::ErrorClass::Describe, + raw::GITERR_REBASE => super::ErrorClass::Rebase, + raw::GITERR_FILESYSTEM => super::ErrorClass::Filesystem, + _ => super::ErrorClass::None, + } + } + + /// Return the raw error code associated with this error. + pub fn raw_code(&self) -> raw::git_error_code { + macro_rules! check( ($($e:ident,)*) => ( + $(if self.code == raw::$e as c_int { raw::$e }) else * + else { + raw::GIT_ERROR + } + ) ); + check!( + GIT_OK, + GIT_ERROR, + GIT_ENOTFOUND, + GIT_EEXISTS, + GIT_EAMBIGUOUS, + GIT_EBUFS, + GIT_EUSER, + GIT_EBAREREPO, + GIT_EUNBORNBRANCH, + GIT_EUNMERGED, + GIT_ENONFASTFORWARD, + GIT_EINVALIDSPEC, + GIT_ECONFLICT, + GIT_ELOCKED, + GIT_EMODIFIED, + GIT_EAUTH, + GIT_ECERTIFICATE, + GIT_EAPPLIED, + GIT_EPEEL, + GIT_EEOF, + GIT_EINVALID, + GIT_EUNCOMMITTED, + GIT_PASSTHROUGH, + GIT_ITEROVER, + ) + } + + /// Return the raw error class associated with this error. + pub fn raw_class(&self) -> raw::git_error_t { + macro_rules! check( ($($e:ident,)*) => ( + $(if self.klass == raw::$e as c_int { raw::$e }) else * + else { + raw::GITERR_NONE + } + ) ); + check!( + GITERR_NONE, + GITERR_NOMEMORY, + GITERR_OS, + GITERR_INVALID, + GITERR_REFERENCE, + GITERR_ZLIB, + GITERR_REPOSITORY, + GITERR_CONFIG, + GITERR_REGEX, + GITERR_ODB, + GITERR_INDEX, + GITERR_OBJECT, + GITERR_NET, + GITERR_TAG, + GITERR_TREE, + GITERR_INDEXER, + GITERR_SSL, + GITERR_SUBMODULE, + GITERR_THREAD, + GITERR_STASH, + GITERR_CHECKOUT, + GITERR_FETCHHEAD, + GITERR_MERGE, + GITERR_SSH, + GITERR_FILTER, + GITERR_REVERT, + GITERR_CALLBACK, + GITERR_CHERRYPICK, + GITERR_DESCRIBE, + GITERR_REBASE, + GITERR_FILESYSTEM, + ) + } + + /// Return the message associated with this error + pub fn message(&self) -> &str { &self.message } +} + +impl error::Error for Error { + fn description(&self) -> &str { &self.message } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + try!(write!(f, "[{}/{}] ", self.klass, self.code)); + f.write_str(&self.message) + } +} + +impl From for Error { + fn from(_: NulError) -> Error { + Error::from_str("data contained a nul byte that could not be \ + represented as a string") + } +} + +impl From for Error { + fn from(e: JoinPathsError) -> Error { + Error::from_str(error::Error::description(&e)) + } +} + + +#[cfg(test)] +mod tests { + use {ErrorClass, ErrorCode}; + + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + + let err = repo.find_submodule("does_not_exist").err().unwrap(); + assert_eq!(err.code(), ErrorCode::NotFound); + assert_eq!(err.class(), ErrorClass::Submodule); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/index.rs cargo-0.19.0/vendor/git2-0.6.4/src/index.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/index.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/index.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,620 @@ +use std::ffi::{CStr, OsString, CString}; +use std::ops::Range; +use std::path::Path; +use std::slice; + +use libc::{c_int, c_uint, size_t, c_void, c_char}; + +use {raw, panic, Repository, Error, Tree, Oid, IndexAddOption, IndexTime}; +use IntoCString; +use util::{self, Binding}; + +/// A structure to represent a git [index][1] +/// +/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects +pub struct Index { + raw: *mut raw::git_index, +} + +/// An iterator over the entries in an index +pub struct IndexEntries<'index> { + range: Range, + index: &'index Index, +} + +/// A callback function to filter index matches. +/// +/// Used by `Index::{add_all,remove_all,update_all}`. The first argument is the +/// path, and the second is the patchspec that matched it. Return 0 to confirm +/// the operation on the item, > 0 to skip the item, and < 0 to abort the scan. +pub type IndexMatchedPath<'a> = FnMut(&Path, &[u8]) -> i32 + 'a; + +/// A structure to represent an entry or a file inside of an index. +/// +/// All fields of an entry are public for modification and inspection. This is +/// also how a new index entry is created. +#[allow(missing_docs)] +pub struct IndexEntry { + pub ctime: IndexTime, + pub mtime: IndexTime, + pub dev: u32, + pub ino: u32, + pub mode: u32, + pub uid: u32, + pub gid: u32, + pub file_size: u32, + pub id: Oid, + pub flags: u16, + pub flags_extended: u16, + pub path: Vec, +} + +impl Index { + /// Creates a new in-memory index. + /// + /// This index object cannot be read/written to the filesystem, but may be + /// used to perform in-memory index operations. + pub fn new() -> Result { + ::init(); + let mut raw = 0 as *mut raw::git_index; + unsafe { + try_call!(raw::git_index_new(&mut raw)); + Ok(Binding::from_raw(raw)) + } + } + + /// Create a new bare Git index object as a memory representation of the Git + /// index file in 'index_path', without a repository to back it. + /// + /// Since there is no ODB or working directory behind this index, any Index + /// methods which rely on these (e.g. add_path) will fail. + /// + /// If you need an index attached to a repository, use the `index()` method + /// on `Repository`. + pub fn open(index_path: &Path) -> Result { + ::init(); + let mut raw = 0 as *mut raw::git_index; + let index_path = try!(index_path.into_c_string()); + unsafe { + try_call!(raw::git_index_open(&mut raw, index_path)); + Ok(Binding::from_raw(raw)) + } + } + + /// Add or update an index entry from an in-memory struct + /// + /// If a previous index entry exists that has the same path and stage as the + /// given 'source_entry', it will be replaced. Otherwise, the 'source_entry' + /// will be added. + pub fn add(&mut self, entry: &IndexEntry) -> Result<(), Error> { + let path = try!(CString::new(&entry.path[..])); + + // libgit2 encodes the length of the path in the lower bits of the + // `flags` entry, so mask those out and recalculate here to ensure we + // don't corrupt anything. + let mut flags = entry.flags & !raw::GIT_IDXENTRY_NAMEMASK; + + if entry.path.len() < raw::GIT_IDXENTRY_NAMEMASK as usize { + flags |= entry.path.len() as u16; + } else { + flags |= raw::GIT_IDXENTRY_NAMEMASK; + } + + unsafe { + let raw = raw::git_index_entry { + dev: entry.dev, + ino: entry.ino, + mode: entry.mode, + uid: entry.uid, + gid: entry.gid, + file_size: entry.file_size, + id: *entry.id.raw(), + flags: flags, + flags_extended: entry.flags_extended, + path: path.as_ptr(), + mtime: raw::git_index_time { + seconds: entry.mtime.seconds(), + nanoseconds: entry.mtime.nanoseconds(), + }, + ctime: raw::git_index_time { + seconds: entry.ctime.seconds(), + nanoseconds: entry.ctime.nanoseconds(), + }, + }; + try_call!(raw::git_index_add(self.raw, &raw)); + Ok(()) + } + } + + /// Add or update an index entry from a file on disk + /// + /// The file path must be relative to the repository's working folder and + /// must be readable. + /// + /// This method will fail in bare index instances. + /// + /// This forces the file to be added to the index, not looking at gitignore + /// rules. + /// + /// If this file currently is the result of a merge conflict, this file will + /// no longer be marked as conflicting. The data about the conflict will be + /// moved to the "resolve undo" (REUC) section. + pub fn add_path(&mut self, path: &Path) -> Result<(), Error> { + // Git apparently expects '/' to be separators for paths + let mut posix_path = OsString::new(); + for (i, comp) in path.components().enumerate() { + if i != 0 { posix_path.push("/"); } + posix_path.push(comp.as_os_str()); + } + let posix_path = try!(posix_path.into_c_string()); + unsafe { + try_call!(raw::git_index_add_bypath(self.raw, posix_path)); + Ok(()) + } + } + + /// Add or update index entries matching files in the working directory. + /// + /// This method will fail in bare index instances. + /// + /// The `pathspecs` are a list of file names or shell glob patterns that + /// will matched against files in the repository's working directory. Each + /// file that matches will be added to the index (either updating an + /// existing entry or adding a new entry). You can disable glob expansion + /// and force exact matching with the `AddDisablePathspecMatch` flag. + /// + /// Files that are ignored will be skipped (unlike `add_path`). If a file is + /// already tracked in the index, then it will be updated even if it is + /// ignored. Pass the `AddForce` flag to skip the checking of ignore rules. + /// + /// To emulate `git add -A` and generate an error if the pathspec contains + /// the exact path of an ignored file (when not using `AddForce`), add the + /// `AddCheckPathspec` flag. This checks that each entry in `pathspecs` + /// that is an exact match to a filename on disk is either not ignored or + /// already in the index. If this check fails, the function will return + /// an error. + /// + /// To emulate `git add -A` with the "dry-run" option, just use a callback + /// function that always returns a positive value. See below for details. + /// + /// If any files are currently the result of a merge conflict, those files + /// will no longer be marked as conflicting. The data about the conflicts + /// will be moved to the "resolve undo" (REUC) section. + /// + /// If you provide a callback function, it will be invoked on each matching + /// item in the working directory immediately before it is added to / + /// updated in the index. Returning zero will add the item to the index, + /// greater than zero will skip the item, and less than zero will abort the + /// scan an return an error to the caller. + pub fn add_all(&mut self, + pathspecs: I, + flag: IndexAddOption, + mut cb: Option<&mut IndexMatchedPath>) + -> Result<(), Error> + where T: IntoCString, I: IntoIterator, + { + let (_a, _b, raw_strarray) = try!(::util::iter2cstrs(pathspecs)); + let ptr = cb.as_mut(); + let callback = ptr.as_ref().map(|_| { + index_matched_path_cb as raw::git_index_matched_path_cb + }); + unsafe { + try_call!(raw::git_index_add_all(self.raw, + &raw_strarray, + flag.bits() as c_uint, + callback, + ptr.map(|p| p as *mut _) + .unwrap_or(0 as *mut _) + as *mut c_void)); + } + return Ok(()); + } + + /// Clear the contents (all the entries) of an index object. + /// + /// This clears the index object in memory; changes must be explicitly + /// written to disk for them to take effect persistently via `write_*`. + pub fn clear(&mut self) -> Result<(), Error> { + unsafe { try_call!(raw::git_index_clear(self.raw)); } + Ok(()) + } + + /// Get the count of entries currently in the index + pub fn len(&self) -> usize { + unsafe { raw::git_index_entrycount(&*self.raw) as usize } + } + + /// Get one of the entries in the index by its position. + pub fn get(&self, n: usize) -> Option { + unsafe { + let ptr = raw::git_index_get_byindex(self.raw, n as size_t); + if ptr.is_null() {None} else {Some(Binding::from_raw(*ptr))} + } + } + + /// Get an iterator over the entries in this index. + pub fn iter(&self) -> IndexEntries { + IndexEntries { range: 0..self.len(), index: self } + } + + /// Get one of the entries in the index by its path. + pub fn get_path(&self, path: &Path, stage: i32) -> Option { + let path = path.into_c_string().unwrap(); + unsafe { + let ptr = call!(raw::git_index_get_bypath(self.raw, path, + stage as c_int)); + if ptr.is_null() {None} else {Some(Binding::from_raw(*ptr))} + } + } + + /// Get the full path to the index file on disk. + /// + /// Returns `None` if this is an in-memory index. + pub fn path(&self) -> Option<&Path> { + unsafe { + ::opt_bytes(self, raw::git_index_path(&*self.raw)).map(util::bytes2path) + } + } + + /// Update the contents of an existing index object in memory by reading + /// from the hard disk. + /// + /// If force is true, this performs a "hard" read that discards in-memory + /// changes and always reloads the on-disk index data. If there is no + /// on-disk version, the index will be cleared. + /// + /// If force is false, this does a "soft" read that reloads the index data + /// from disk only if it has changed since the last time it was loaded. + /// Purely in-memory index data will be untouched. Be aware: if there are + /// changes on disk, unwritten in-memory changes are discarded. + pub fn read(&mut self, force: bool) -> Result<(), Error> { + unsafe { try_call!(raw::git_index_read(self.raw, force)); } + Ok(()) + } + + /// Read a tree into the index file with stats + /// + /// The current index contents will be replaced by the specified tree. + pub fn read_tree(&mut self, tree: &Tree) -> Result<(), Error> { + unsafe { try_call!(raw::git_index_read_tree(self.raw, &*tree.raw())); } + Ok(()) + } + + /// Remove an entry from the index + pub fn remove(&mut self, path: &Path, stage: i32) -> Result<(), Error> { + let path = try!(path.into_c_string()); + unsafe { + try_call!(raw::git_index_remove(self.raw, path, stage as c_int)); + } + Ok(()) + } + + /// Remove an index entry corresponding to a file on disk. + /// + /// The file path must be relative to the repository's working folder. It + /// may exist. + /// + /// If this file currently is the result of a merge conflict, this file will + /// no longer be marked as conflicting. The data about the conflict will be + /// moved to the "resolve undo" (REUC) section. + pub fn remove_path(&mut self, path: &Path) -> Result<(), Error> { + let path = try!(path.into_c_string()); + unsafe { + try_call!(raw::git_index_remove_bypath(self.raw, path)); + } + Ok(()) + } + + /// Remove all entries from the index under a given directory. + pub fn remove_dir(&mut self, path: &Path, stage: i32) -> Result<(), Error> { + let path = try!(path.into_c_string()); + unsafe { + try_call!(raw::git_index_remove_directory(self.raw, path, + stage as c_int)); + } + Ok(()) + } + + /// Remove all matching index entries. + /// + /// If you provide a callback function, it will be invoked on each matching + /// item in the index immediately before it is removed. Return 0 to remove + /// the item, > 0 to skip the item, and < 0 to abort the scan. + pub fn remove_all(&mut self, + pathspecs: I, + mut cb: Option<&mut IndexMatchedPath>) + -> Result<(), Error> + where T: IntoCString, I: IntoIterator, + { + let (_a, _b, raw_strarray) = try!(::util::iter2cstrs(pathspecs)); + let ptr = cb.as_mut(); + let callback = ptr.as_ref().map(|_| { + index_matched_path_cb as raw::git_index_matched_path_cb + }); + unsafe { + try_call!(raw::git_index_remove_all(self.raw, + &raw_strarray, + callback, + ptr.map(|p| p as *mut _) + .unwrap_or(0 as *mut _) + as *mut c_void)); + } + return Ok(()); + } + + /// Update all index entries to match the working directory + /// + /// This method will fail in bare index instances. + /// + /// This scans the existing index entries and synchronizes them with the + /// working directory, deleting them if the corresponding working directory + /// file no longer exists otherwise updating the information (including + /// adding the latest version of file to the ODB if needed). + /// + /// If you provide a callback function, it will be invoked on each matching + /// item in the index immediately before it is updated (either refreshed or + /// removed depending on working directory state). Return 0 to proceed with + /// updating the item, > 0 to skip the item, and < 0 to abort the scan. + pub fn update_all(&mut self, + pathspecs: I, + mut cb: Option<&mut IndexMatchedPath>) + -> Result<(), Error> + where T: IntoCString, I: IntoIterator, + { + let (_a, _b, raw_strarray) = try!(::util::iter2cstrs(pathspecs)); + let ptr = cb.as_mut(); + let callback = ptr.as_ref().map(|_| { + index_matched_path_cb as raw::git_index_matched_path_cb + }); + unsafe { + try_call!(raw::git_index_update_all(self.raw, + &raw_strarray, + callback, + ptr.map(|p| p as *mut _) + .unwrap_or(0 as *mut _) + as *mut c_void)); + } + return Ok(()); + } + + /// Write an existing index object from memory back to disk using an atomic + /// file lock. + pub fn write(&mut self) -> Result<(), Error> { + unsafe { try_call!(raw::git_index_write(self.raw)); } + Ok(()) + } + + /// Write the index as a tree. + /// + /// This method will scan the index and write a representation of its + /// current state back to disk; it recursively creates tree objects for each + /// of the subtrees stored in the index, but only returns the OID of the + /// root tree. This is the OID that can be used e.g. to create a commit. + /// + /// The index instance cannot be bare, and needs to be associated to an + /// existing repository. + /// + /// The index must not contain any file in conflict. + pub fn write_tree(&mut self) -> Result { + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_index_write_tree(&mut raw, self.raw)); + Ok(Binding::from_raw(&raw as *const _)) + } + } + + /// Write the index as a tree to the given repository + /// + /// This is the same as `write_tree` except that the destination repository + /// can be chosen. + pub fn write_tree_to(&mut self, repo: &Repository) -> Result { + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_index_write_tree_to(&mut raw, self.raw, + repo.raw())); + Ok(Binding::from_raw(&raw as *const _)) + } + } +} + +impl Binding for Index { + type Raw = *mut raw::git_index; + unsafe fn from_raw(raw: *mut raw::git_index) -> Index { + Index { raw: raw } + } + fn raw(&self) -> *mut raw::git_index { self.raw } +} + +extern fn index_matched_path_cb(path: *const c_char, + matched_pathspec: *const c_char, + payload: *mut c_void) -> c_int { + unsafe { + let path = CStr::from_ptr(path).to_bytes(); + let matched_pathspec = CStr::from_ptr(matched_pathspec).to_bytes(); + + panic::wrap(|| { + let payload = payload as *mut &mut IndexMatchedPath; + (*payload)(util::bytes2path(path), matched_pathspec) as c_int + }).unwrap_or(-1) + } +} + +impl Drop for Index { + fn drop(&mut self) { + unsafe { raw::git_index_free(self.raw) } + } +} + +impl<'index> Iterator for IndexEntries<'index> { + type Item = IndexEntry; + fn next(&mut self) -> Option { + self.range.next().map(|i| self.index.get(i).unwrap()) + } +} + +impl Binding for IndexEntry { + type Raw = raw::git_index_entry; + + unsafe fn from_raw(raw: raw::git_index_entry) -> IndexEntry { + let raw::git_index_entry { + ctime, mtime, dev, ino, mode, uid, gid, file_size, id, flags, + flags_extended, path + } = raw; + + // libgit2 encodes the length of the path in the lower bits of `flags`, + // but if the length exceeds the number of bits then the path is + // nul-terminated. + let mut pathlen = (flags & raw::GIT_IDXENTRY_NAMEMASK) as usize; + if pathlen == raw::GIT_IDXENTRY_NAMEMASK as usize { + pathlen = CStr::from_ptr(path).to_bytes().len(); + } + + let path = slice::from_raw_parts(path as *const u8, pathlen); + + IndexEntry { + dev: dev, + ino: ino, + mode: mode, + uid: uid, + gid: gid, + file_size: file_size, + id: Binding::from_raw(&id as *const _), + flags: flags, + flags_extended: flags_extended, + path: path.to_vec(), + mtime: Binding::from_raw(mtime), + ctime: Binding::from_raw(ctime), + } + } + + fn raw(&self) -> raw::git_index_entry { + // not implemented, may require a CString in storage + panic!() + } +} + +#[cfg(test)] +mod tests { + use std::fs::{self, File}; + use std::path::Path; + use tempdir::TempDir; + + use {Index, IndexEntry, Repository, ResetType, Oid, IndexTime}; + + #[test] + fn smoke() { + let mut index = Index::new().unwrap(); + assert!(index.add_path(&Path::new(".")).is_err()); + index.clear().unwrap(); + assert_eq!(index.len(), 0); + assert!(index.get(0).is_none()); + assert!(index.path().is_none()); + assert!(index.read(true).is_err()); + } + + #[test] + fn smoke_from_repo() { + let (_td, repo) = ::test::repo_init(); + let mut index = repo.index().unwrap(); + assert_eq!(index.path().map(|s| s.to_path_buf()), + Some(repo.path().join("index"))); + Index::open(&repo.path().join("index")).unwrap(); + + index.clear().unwrap(); + index.read(true).unwrap(); + index.write().unwrap(); + index.write_tree().unwrap(); + index.write_tree_to(&repo).unwrap(); + } + + #[test] + fn add_all() { + let (_td, repo) = ::test::repo_init(); + let mut index = repo.index().unwrap(); + + let root = repo.path().parent().unwrap(); + fs::create_dir(&root.join("foo")).unwrap(); + File::create(&root.join("foo/bar")).unwrap(); + let mut called = false; + index.add_all(["foo"].iter(), ::ADD_DEFAULT, + Some(&mut |a: &Path, b: &[u8]| { + assert!(!called); + called = true; + assert_eq!(b, b"foo"); + assert_eq!(a, Path::new("foo/bar")); + 0 + })).unwrap(); + assert!(called); + + called = false; + index.remove_all(["."].iter(), Some(&mut |a: &Path, b: &[u8]| { + assert!(!called); + called = true; + assert_eq!(b, b"."); + assert_eq!(a, Path::new("foo/bar")); + 0 + })).unwrap(); + assert!(called); + } + + #[test] + fn smoke_add() { + let (_td, repo) = ::test::repo_init(); + let mut index = repo.index().unwrap(); + + let root = repo.path().parent().unwrap(); + fs::create_dir(&root.join("foo")).unwrap(); + File::create(&root.join("foo/bar")).unwrap(); + index.add_path(Path::new("foo/bar")).unwrap(); + index.write().unwrap(); + assert_eq!(index.iter().count(), 1); + + // Make sure we can use this repo somewhere else now. + let id = index.write_tree().unwrap(); + let tree = repo.find_tree(id).unwrap(); + let sig = repo.signature().unwrap(); + let id = repo.refname_to_id("HEAD").unwrap(); + let parent = repo.find_commit(id).unwrap(); + let commit = repo.commit(Some("HEAD"), &sig, &sig, "commit", + &tree, &[&parent]).unwrap(); + let obj = repo.find_object(commit, None).unwrap(); + repo.reset(&obj, ResetType::Hard, None).unwrap(); + + let td2 = TempDir::new("git").unwrap(); + let url = ::test::path2url(&root); + let repo = Repository::clone(&url, td2.path()).unwrap(); + let obj = repo.find_object(commit, None).unwrap(); + repo.reset(&obj, ResetType::Hard, None).unwrap(); + } + + #[test] + fn add_then_read() { + let mut index = Index::new().unwrap(); + assert!(index.add(&entry()).is_err()); + + let mut index = Index::new().unwrap(); + let mut e = entry(); + e.path = b"foobar".to_vec(); + index.add(&e).unwrap(); + let e = index.get(0).unwrap(); + assert_eq!(e.path.len(), 6); + } + + fn entry() -> IndexEntry { + IndexEntry { + ctime: IndexTime::new(0, 0), + mtime: IndexTime::new(0, 0), + dev: 0, + ino: 0, + mode: 0o100644, + uid: 0, + gid: 0, + file_size: 0, + id: Oid::from_bytes(&[0; 20]).unwrap(), + flags: 0, + flags_extended: 0, + path: Vec::new(), + } + } +} + diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/lib.rs cargo-0.19.0/vendor/git2-0.6.4/src/lib.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/lib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,1091 @@ +//! # libgit2 bindings for Rust +//! +//! This library contains bindings to the [libgit2][1] C library which is used +//! to manage git repositories. The library itself is a work in progress and is +//! likely lacking some bindings here and there, so be warned. +//! +//! [1]: https://libgit2.github.com/ +//! +//! The git2-rs library strives to be as close to libgit2 as possible, but also +//! strives to make using libgit2 as safe as possible. All resource management +//! is automatic as well as adding strong types to all interfaces (including +//! `Result`) +//! +//! ## Creating a `Repository` +//! +//! The `Repository` is the source from which almost all other objects in git-rs +//! are spawned. A repository can be created through opening, initializing, or +//! cloning. +//! +//! ### Initializing a new repository +//! +//! The `init` method will create a new repository, assuming one does not +//! already exist. +//! +//! ```no_run +//! # #![allow(unstable)] +//! use git2::Repository; +//! +//! let repo = match Repository::init("/path/to/a/repo") { +//! Ok(repo) => repo, +//! Err(e) => panic!("failed to init: {}", e), +//! }; +//! ``` +//! +//! ### Opening an existing repository +//! +//! ```no_run +//! # #![allow(unstable)] +//! use git2::Repository; +//! +//! let repo = match Repository::open("/path/to/a/repo") { +//! Ok(repo) => repo, +//! Err(e) => panic!("failed to open: {}", e), +//! }; +//! ``` +//! +//! ### Cloning an existing repository +//! +//! ```no_run +//! # #![allow(unstable)] +//! use git2::Repository; +//! +//! let url = "https://github.com/alexcrichton/git2-rs"; +//! let repo = match Repository::clone(url, "/path/to/a/repo") { +//! Ok(repo) => repo, +//! Err(e) => panic!("failed to clone: {}", e), +//! }; +//! ``` +//! +//! ## Working with a `Repository` +//! +//! All deriviative objects, references, etc are attached to the lifetime of the +//! source `Repository`, to ensure that they do not outlive the repository +//! itself. + +#![doc(html_root_url = "https://docs.rs/git2/0.6")] +#![allow(trivial_numeric_casts, trivial_casts)] +#![deny(missing_docs)] +#![cfg_attr(test, deny(warnings))] + +extern crate libc; +extern crate url; +extern crate libgit2_sys as raw; +#[macro_use] extern crate bitflags; +#[cfg(test)] extern crate tempdir; + +use std::ffi::{CStr, CString}; +use std::fmt; +use std::str; +use std::sync::{Once, ONCE_INIT}; + +pub use blame::{Blame, BlameHunk, BlameIter, BlameOptions}; +pub use blob::Blob; +pub use branch::{Branch, Branches}; +pub use buf::Buf; +pub use commit::{Commit, Parents}; +pub use config::{Config, ConfigEntry, ConfigEntries}; +pub use cred::{Cred, CredentialHelper}; +pub use describe::{Describe, DescribeFormatOptions, DescribeOptions}; +pub use diff::{Diff, DiffDelta, DiffFile, DiffOptions, Deltas}; +pub use diff::{DiffBinary, DiffBinaryFile, DiffBinaryKind}; +pub use diff::{DiffLine, DiffHunk, DiffStats, DiffFindOptions}; +pub use error::Error; +pub use index::{Index, IndexEntry, IndexEntries, IndexMatchedPath}; +pub use merge::{AnnotatedCommit, MergeOptions}; +pub use message::{message_prettify, DEFAULT_COMMENT_CHAR}; +pub use note::{Note, Notes}; +pub use object::Object; +pub use oid::Oid; +pub use packbuilder::{PackBuilder, PackBuilderStage}; +pub use pathspec::{Pathspec, PathspecMatchList, PathspecFailedEntries}; +pub use pathspec::{PathspecDiffEntries, PathspecEntries}; +pub use patch::Patch; +pub use proxy_options::ProxyOptions; +pub use reference::{Reference, References, ReferenceNames}; +pub use reflog::{Reflog, ReflogEntry, ReflogIter}; +pub use refspec::Refspec; +pub use remote::{Remote, Refspecs, RemoteHead, FetchOptions, PushOptions}; +pub use remote_callbacks::{RemoteCallbacks, Credentials, TransferProgress}; +pub use remote_callbacks::{TransportMessage, Progress, UpdateTips}; +pub use repo::{Repository, RepositoryInitOptions}; +pub use revspec::Revspec; +pub use revwalk::Revwalk; +pub use signature::Signature; +pub use status::{StatusOptions, Statuses, StatusIter, StatusEntry, StatusShow}; +pub use stash::{StashApplyOptions, StashCb, StashApplyProgressCb}; +pub use submodule::Submodule; +pub use tag::Tag; +pub use time::{Time, IndexTime}; +pub use tree::{Tree, TreeEntry, TreeIter}; +pub use treebuilder::TreeBuilder; +pub use util::IntoCString; + +/// An enumeration of possible errors that can happen when working with a git +/// repository. +#[derive(PartialEq, Eq, Clone, Debug, Copy)] +pub enum ErrorCode { + /// Generic error + GenericError, + /// Requested object could not be found + NotFound, + /// Object exists preventing operation + Exists, + /// More than one object matches + Ambiguous, + /// Output buffer too short to hold data + BufSize, + /// User-generated error + User, + /// Operation not allowed on bare repository + BareRepo, + /// HEAD refers to branch with no commits + UnbornBranch, + /// Merge in progress prevented operation + Unmerged, + /// Reference was not fast-forwardable + NotFastForward, + /// Name/ref spec was not in a valid format + InvalidSpec, + /// Checkout conflicts prevented operation + Conflict, + /// Lock file prevented operation + Locked, + /// Reference value does not match expected + Modified, + /// Authentication error + Auth, + /// Server certificate is invalid + Certificate, + /// Patch/merge has already been applied + Applied, + /// The requested peel operation is not possible + Peel, + /// Unexpected EOF + Eof, + /// Invalid operation or input + Invalid, + /// Uncommitted changes in index prevented operation + Uncommitted, + /// Operation was not valid for a directory, + Directory, +} + +/// An enumeration of possible categories of things that can have +/// errors when working with a git repository. +#[derive(PartialEq, Eq, Clone, Debug, Copy)] +pub enum ErrorClass { + /// Uncategorized + None, + /// Out of memory or insufficient allocated space + NoMemory, + /// Syscall or standard system library error + Os, + /// Invalid input + Invalid, + /// Error resolving or manipulating a reference + Reference, + /// ZLib failure + Zlib, + /// Bad repository state + Repository, + /// Bad configuration + Config, + /// Regex failure + Regex, + /// Bad object + Odb, + /// Invalid index data + Index, + /// Error creating or obtaining an object + Object, + /// Network error + Net, + /// Error manpulating a tag + Tag, + /// Invalid value in tree + Tree, + /// Hashing or packing error + Indexer, + /// Error from SSL + Ssl, + /// Error involing submodules + Submodule, + /// Threading error + Thread, + /// Error manipulating a stash + Stash, + /// Checkout failure + Checkout, + /// Invalid FETCH_HEAD + FetchHead, + /// Merge failure + Merge, + /// SSH failure + Ssh, + /// Error manipulating filters + Filter, + /// Error reverting commit + Revert, + /// Error from a user callback + Callback, + /// Error cherry-picking commit + CherryPick, + /// Can't describe object + Describe, + /// Error during rebase + Rebase, + /// Filesystem-related error + Filesystem, +} + +/// A listing of the possible states that a repository can be in. +#[derive(PartialEq, Eq, Clone, Debug, Copy)] +#[allow(missing_docs)] +pub enum RepositoryState { + Clean, + Merge, + Revert, + RevertSequence, + CherryPick, + CherryPickSequence, + Bisect, + Rebase, + RebaseInteractive, + RebaseMerge, + ApplyMailbox, + ApplyMailboxOrRebase, +} + +/// An enumeration of the possible directions for a remote. +#[derive(Copy, Clone)] +pub enum Direction { + /// Data will be fetched (read) from this remote. + Fetch, + /// Data will be pushed (written) to this remote. + Push, +} + +/// An enumeration of the operations that can be performed for the `reset` +/// method on a `Repository`. +#[derive(Copy, Clone)] +pub enum ResetType { + /// Move the head to the given commit. + Soft, + /// Soft plus reset the index to the commit. + Mixed, + /// Mixed plus changes in the working tree are discarded. + Hard, +} + +/// An enumeration all possible kinds objects may have. +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum ObjectType { + /// Any kind of git object + Any, + /// An object which corresponds to a git commit + Commit, + /// An object which corresponds to a git tree + Tree, + /// An object which corresponds to a git blob + Blob, + /// An object which corresponds to a git tag + Tag, +} + +/// An enumeration for the possible types of branches +#[derive(PartialEq, Eq, Debug, Copy, Clone)] +pub enum BranchType { + /// A local branch not on a remote. + Local, + /// A branch for a remote. + Remote, +} + +/// An enumeration of the possible priority levels of a config file. +/// +/// The levels corresponding to the escalation logic (higher to lower) when +/// searching for config entries. +#[derive(PartialEq, Eq, Debug, Copy, Clone)] +pub enum ConfigLevel { + /// System-wide on Windows, for compatibility with portable git + ProgramData, + /// System-wide configuration file, e.g. /etc/gitconfig + System, + /// XDG-compatible configuration file, e.g. ~/.config/git/config + XDG, + /// User-specific configuration, e.g. ~/.gitconfig + Global, + /// Repository specific config, e.g. $PWD/.git/config + Local, + /// Application specific configuration file + App, + /// Highest level available + Highest, +} + +/// Merge file favor options for `MergeOptions` instruct the file-level +/// merging functionality how to deal with conflicting regions of the files. +#[derive(PartialEq, Eq, Debug, Copy, Clone)] +pub enum FileFavor { + /// When a region of a file is changed in both branches, a conflict will be + /// recorded in the index so that git_checkout can produce a merge file with + /// conflict markers in the working directory. This is the default. + Normal, + /// When a region of a file is changed in both branches, the file created + /// in the index will contain the "ours" side of any conflicting region. + /// The index will not record a conflict. + Ours, + /// When a region of a file is changed in both branches, the file created + /// in the index will contain the "theirs" side of any conflicting region. + /// The index will not record a conflict. + Theirs, + /// When a region of a file is changed in both branches, the file created + /// in the index will contain each unique line from each side, which has + /// the result of combining both files. The index will not record a conflict. + Union, +} + +bitflags! { + /// Orderings that may be specified for Revwalk iteration. + pub flags Sort: u32 { + /// Sort the repository contents in no particular ordering. + /// + /// This sorting is arbitrary, implementation-specific, and subject to + /// change at any time. This is the default sorting for new walkers. + const SORT_NONE = raw::GIT_SORT_NONE as u32, + + /// Sort the repository contents in topological order (parents before + /// children). + /// + /// This sorting mode can be combined with time sorting. + const SORT_TOPOLOGICAL = raw::GIT_SORT_TOPOLOGICAL as u32, + + /// Sort the repository contents by commit time. + /// + /// This sorting mode can be combined with topological sorting. + const SORT_TIME = raw::GIT_SORT_TIME as u32, + + /// Iterate through the repository contents in reverse order. + /// + /// This sorting mode can be combined with any others. + const SORT_REVERSE = raw::GIT_SORT_REVERSE as u32, + } +} + +bitflags! { + /// Types of credentials that can be requested by a credential callback. + pub flags CredentialType: u32 { + #[allow(missing_docs)] + const USER_PASS_PLAINTEXT = raw::GIT_CREDTYPE_USERPASS_PLAINTEXT as u32, + #[allow(missing_docs)] + const SSH_KEY = raw::GIT_CREDTYPE_SSH_KEY as u32, + #[allow(missing_docs)] + const SSH_MEMORY = raw::GIT_CREDTYPE_SSH_MEMORY as u32, + #[allow(missing_docs)] + const SSH_CUSTOM = raw::GIT_CREDTYPE_SSH_CUSTOM as u32, + #[allow(missing_docs)] + const DEFAULT = raw::GIT_CREDTYPE_DEFAULT as u32, + #[allow(missing_docs)] + const SSH_INTERACTIVE = raw::GIT_CREDTYPE_SSH_INTERACTIVE as u32, + #[allow(missing_docs)] + const USERNAME = raw::GIT_CREDTYPE_USERNAME as u32, + } +} + +bitflags! { + /// Flags for the `flags` field of an IndexEntry. + pub flags IndexEntryFlag: u16 { + /// Set when the `extended_flags` field is valid. + const IDXENTRY_EXTENDED = raw::GIT_IDXENTRY_EXTENDED as u16, + /// "Assume valid" flag + const IDXENTRY_VALID = raw::GIT_IDXENTRY_VALID as u16, + } +} + +bitflags! { + /// Flags for the `extended_flags` field of an IndexEntry. + pub flags IndexEntryExtendedFlag: u16 { + /// An "intent to add" entry from "git add -N" + const IDXENTRY_INTENT_TO_ADD = raw::GIT_IDXENTRY_INTENT_TO_ADD as u16, + /// Skip the associated worktree file, for sparse checkouts + const IDXENTRY_SKIP_WORKTREE = raw::GIT_IDXENTRY_SKIP_WORKTREE as u16, + /// Reserved for a future on-disk extended flag + const IDXENTRY_EXTENDED2 = raw::GIT_IDXENTRY_EXTENDED2 as u16, + + #[allow(missing_docs)] + const IDXENTRY_UPDATE = raw::GIT_IDXENTRY_UPDATE as u16, + #[allow(missing_docs)] + const IDXENTRY_REMOVE = raw::GIT_IDXENTRY_REMOVE as u16, + #[allow(missing_docs)] + const IDXENTRY_UPTODATE = raw::GIT_IDXENTRY_UPTODATE as u16, + #[allow(missing_docs)] + const IDXENTRY_ADDED = raw::GIT_IDXENTRY_ADDED as u16, + + #[allow(missing_docs)] + const IDXENTRY_HASHED = raw::GIT_IDXENTRY_HASHED as u16, + #[allow(missing_docs)] + const IDXENTRY_UNHASHED = raw::GIT_IDXENTRY_UNHASHED as u16, + #[allow(missing_docs)] + const IDXENTRY_WT_REMOVE = raw::GIT_IDXENTRY_WT_REMOVE as u16, + #[allow(missing_docs)] + const IDXENTRY_CONFLICTED = raw::GIT_IDXENTRY_CONFLICTED as u16, + + #[allow(missing_docs)] + const IDXENTRY_UNPACKED = raw::GIT_IDXENTRY_UNPACKED as u16, + #[allow(missing_docs)] + const IDXENTRY_NEW_SKIP_WORKTREE = raw::GIT_IDXENTRY_NEW_SKIP_WORKTREE as u16, + } +} + +bitflags! { + /// Flags for APIs that add files matching pathspec + pub flags IndexAddOption: u32 { + #[allow(missing_docs)] + const ADD_DEFAULT = raw::GIT_INDEX_ADD_DEFAULT as u32, + #[allow(missing_docs)] + const ADD_FORCE = raw::GIT_INDEX_ADD_FORCE as u32, + #[allow(missing_docs)] + const ADD_DISABLE_PATHSPEC_MATCH = + raw::GIT_INDEX_ADD_DISABLE_PATHSPEC_MATCH as u32, + #[allow(missing_docs)] + const ADD_CHECK_PATHSPEC = raw::GIT_INDEX_ADD_CHECK_PATHSPEC as u32, + } +} + +bitflags! { + /// Flags for `Repository::open_ext` + pub flags RepositoryOpenFlags: u32 { + /// Only open the specified path; don't walk upward searching. + const REPOSITORY_OPEN_NO_SEARCH = raw::GIT_REPOSITORY_OPEN_NO_SEARCH as u32, + /// Search across filesystem boundaries. + const REPOSITORY_OPEN_CROSS_FS = raw::GIT_REPOSITORY_OPEN_CROSS_FS as u32, + /// Force opening as bare repository, and defer loading its config. + const REPOSITORY_OPEN_BARE = raw::GIT_REPOSITORY_OPEN_BARE as u32, + /// Don't try appending `/.git` to the specified repository path. + const REPOSITORY_OPEN_NO_DOTGIT = raw::GIT_REPOSITORY_OPEN_NO_DOTGIT as u32, + /// Respect environment variables like `$GIT_DIR`. + const REPOSITORY_OPEN_FROM_ENV = raw::GIT_REPOSITORY_OPEN_FROM_ENV as u32, + } +} + +bitflags! { + /// Flags for the return value of `Repository::revparse` + pub flags RevparseMode: u32 { + /// The spec targeted a single object + const REVPARSE_SINGLE = raw::GIT_REVPARSE_SINGLE as u32, + /// The spec targeted a range of commits + const REVPARSE_RANGE = raw::GIT_REVPARSE_RANGE as u32, + /// The spec used the `...` operator, which invokes special semantics. + const REVPARSE_MERGE_BASE = raw::GIT_REVPARSE_MERGE_BASE as u32, + } +} + +#[cfg(test)] #[macro_use] mod test; +#[macro_use] mod panic; +mod call; +mod util; + +pub mod build; +pub mod cert; +pub mod string_array; +pub mod oid_array; +pub mod transport; + +mod blame; +mod blob; +mod branch; +mod buf; +mod commit; +mod config; +mod cred; +mod describe; +mod diff; +mod error; +mod index; +mod merge; +mod message; +mod note; +mod object; +mod oid; +mod packbuilder; +mod pathspec; +mod patch; +mod proxy_options; +mod reference; +mod reflog; +mod refspec; +mod remote; +mod remote_callbacks; +mod repo; +mod revspec; +mod revwalk; +mod signature; +mod status; +mod submodule; +mod stash; +mod tag; +mod time; +mod tree; +mod treebuilder; + +fn init() { + static INIT: Once = ONCE_INIT; + + INIT.call_once(|| { + openssl_env_init(); + }); + + raw::init(); +} + +#[cfg(all(unix, not(target_os = "macos"), not(target_os = "ios"), feature = "https"))] +fn openssl_env_init() { + extern crate openssl_probe; + + // Currently, libgit2 leverages OpenSSL for SSL support when cloning + // repositories over HTTPS. This means that we're picking up an OpenSSL + // dependency on non-Windows platforms (where it has its own HTTPS + // subsystem). As a result, we need to link to OpenSSL. + // + // Now actually *linking* to OpenSSL isn't so hard. We just need to make + // sure to use pkg-config to discover any relevant system dependencies for + // differences between distributions like CentOS and Ubuntu. The actual + // trickiness comes about when we start *distributing* the resulting + // binaries. Currently Cargo is distributed in binary form as nightlies, + // which means we're distributing a binary with OpenSSL linked in. + // + // For historical reasons, the Linux nightly builder is running a CentOS + // distribution in order to have as much ABI compatibility with other + // distributions as possible. Sadly, however, this compatibility does not + // extend to OpenSSL. Currently OpenSSL has two major versions, 0.9 and 1.0, + // which are incompatible (many ABI differences). The CentOS builder we + // build on has version 1.0, as do most distributions today. Some still have + // 0.9, however. This means that if we are to distribute the binaries built + // by the CentOS machine, we would only be compatible with OpenSSL 1.0 and + // we would fail to run (a dynamic linker error at runtime) on systems with + // only 9.8 installed (hopefully). + // + // But wait, the plot thickens! Apparently CentOS has dubbed their OpenSSL + // library as `libssl.so.10`, notably the `10` is included at the end. On + // the other hand Ubuntu, for example, only distributes `libssl.so`. This + // means that the binaries created at CentOS are hard-wired to probe for a + // file called `libssl.so.10` at runtime (using the LD_LIBRARY_PATH), which + // will not be found on ubuntu. The conclusion of this is that binaries + // built on CentOS cannot be distributed to Ubuntu and run successfully. + // + // There are a number of sneaky things we could do, including, but not + // limited to: + // + // 1. Create a shim program which runs "just before" cargo runs. The + // responsibility of this shim program would be to locate `libssl.so`, + // whatever it's called, on the current system, make sure there's a + // symlink *somewhere* called `libssl.so.10`, and then set up + // LD_LIBRARY_PATH and run the actual cargo. + // + // This approach definitely seems unconventional, and is borderline + // overkill for this problem. It's also dubious if we can find a + // libssl.so reliably on the target system. + // + // 2. Somehow re-work the CentOS installation so that the linked-against + // library is called libssl.so instead of libssl.so.10 + // + // The problem with this approach is that systems with 0.9 installed will + // start to silently fail, due to also having libraries called libssl.so + // (probably symlinked under a more appropriate version). + // + // 3. Compile Cargo against both OpenSSL 1.0 *and* OpenSSL 0.9, and + // distribute both. Also make sure that the linked-against name of the + // library is `libssl.so`. At runtime we determine which version is + // installed, and we then the appropriate binary. + // + // This approach clearly has drawbacks in terms of infrastructure and + // feasibility. + // + // 4. Build a nightly of Cargo for each distribution we'd like to support. + // You would then pick the appropriate Cargo nightly to install locally. + // + // So, with all this in mind, the decision was made to *statically* link + // OpenSSL. This solves any problem of relying on a downstream OpenSSL + // version being available. This does, however, open a can of worms related + // to security issues. It's generally a good idea to dynamically link + // OpenSSL as you'll get security updates over time without having to do + // anything (the system administrator will update the local openssl + // package). By statically linking, we're forfeiting this feature. + // + // The conclusion was made it is likely appropriate for the Cargo nightlies + // to statically link OpenSSL, but highly encourage distributions and + // packagers of Cargo to dynamically link OpenSSL. Packagers are targeting + // one system and are distributing to only that system, so none of the + // problems mentioned above would arise. + // + // In order to support this, a new package was made: openssl-static-sys. + // This package currently performs a fairly simple task: + // + // 1. Run pkg-config to discover where openssl is installed. + // 2. If openssl is installed in a nonstandard location, *and* static copies + // of the libraries are available, copy them to $OUT_DIR. + // + // This library will bring in libssl.a and libcrypto.a into the local build, + // allowing them to be picked up by this crate. This allows us to configure + // our own buildbots to have pkg-config point to these local pre-built + // copies of a static OpenSSL (with very few dependencies) while allowing + // most other builds of Cargo to naturally dynamically link OpenSSL. + // + // So in summary, if you're with me so far, we've statically linked OpenSSL + // to the Cargo binary (or any binary, for that matter) and we're ready to + // distribute it to *all* linux distributions. Remember that our original + // intent for openssl was for HTTPS support, which implies that we need some + // for of CA certificate store to validate certificates. This is normally + // installed in a standard system location. + // + // Unfortunately, as one might imagine, OpenSSL is configured for where this + // standard location is at *build time*, but it often varies widely + // per-system. Consequently, it was discovered that OpenSSL will respect the + // SSL_CERT_FILE and SSL_CERT_DIR environment variables in order to assist + // in discovering the location of this file (hurray!). + // + // So, finally getting to the point, this function solely exists to support + // our static builds of OpenSSL by probing for the "standard system + // location" of certificates and setting relevant environment variable to + // point to them. + // + // Ah, and as a final note, this is only a problem on Linux, not on OS X. On + // OS X the OpenSSL binaries are stable enough that we can just rely on + // dynamic linkage (plus they have some weird modifications to OpenSSL which + // means we wouldn't want to link statically). + openssl_probe::init_ssl_cert_env_vars(); +} + +#[cfg(any(windows, target_os = "macos", target_os = "ios", not(feature = "https")))] +fn openssl_env_init() {} + +unsafe fn opt_bytes<'a, T>(_anchor: &'a T, + c: *const libc::c_char) -> Option<&'a [u8]> { + if c.is_null() { + None + } else { + Some(CStr::from_ptr(c).to_bytes()) + } +} + +fn opt_cstr(o: Option) -> Result, Error> { + match o { + Some(s) => s.into_c_string().map(Some), + None => Ok(None) + } +} + +impl ObjectType { + /// Convert an object type to its string representation. + pub fn str(&self) -> &'static str { + unsafe { + let ptr = call!(raw::git_object_type2string(*self)) as *const _; + let data = CStr::from_ptr(ptr).to_bytes(); + str::from_utf8(data).unwrap() + } + } + + /// Determine if the given git_otype is a valid loose object type. + pub fn is_loose(&self) -> bool { + unsafe { (call!(raw::git_object_typeisloose(*self)) == 1) } + } + + /// Convert a raw git_otype to an ObjectType + pub fn from_raw(raw: raw::git_otype) -> Option { + match raw { + raw::GIT_OBJ_ANY => Some(ObjectType::Any), + raw::GIT_OBJ_COMMIT => Some(ObjectType::Commit), + raw::GIT_OBJ_TREE => Some(ObjectType::Tree), + raw::GIT_OBJ_BLOB => Some(ObjectType::Blob), + raw::GIT_OBJ_TAG => Some(ObjectType::Tag), + _ => None, + } + } + + /// Convert this kind into its raw representation + pub fn raw(&self) -> raw::git_otype { + call::convert(self) + } + + /// Convert a string object type representation to its object type. + pub fn from_str(s: &str) -> Option { + let raw = unsafe { call!(raw::git_object_string2type(CString::new(s).unwrap())) }; + ObjectType::from_raw(raw) + } +} + +impl fmt::Display for ObjectType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.str().fmt(f) + } +} + +impl ConfigLevel { + /// Converts a raw configuration level to a ConfigLevel + pub fn from_raw(raw: raw::git_config_level_t) -> ConfigLevel { + match raw { + raw::GIT_CONFIG_LEVEL_PROGRAMDATA => ConfigLevel::ProgramData, + raw::GIT_CONFIG_LEVEL_SYSTEM => ConfigLevel::System, + raw::GIT_CONFIG_LEVEL_XDG => ConfigLevel::XDG, + raw::GIT_CONFIG_LEVEL_GLOBAL => ConfigLevel::Global, + raw::GIT_CONFIG_LEVEL_LOCAL => ConfigLevel::Local, + raw::GIT_CONFIG_LEVEL_APP => ConfigLevel::App, + raw::GIT_CONFIG_HIGHEST_LEVEL => ConfigLevel::Highest, + n => panic!("unknown config level: {}", n), + } + } +} + +bitflags! { + /// Status flags for a single file + /// + /// A combination of these values will be returned to indicate the status of + /// a file. Status compares the working directory, the index, and the + /// current HEAD of the repository. The `STATUS_INDEX_*` set of flags + /// represents the status of file in the index relative to the HEAD, and the + /// `STATUS_WT_*` set of flags represent the status of the file in the + /// working directory relative to the index. + pub flags Status: u32 { + #[allow(missing_docs)] + const STATUS_CURRENT = raw::GIT_STATUS_CURRENT as u32, + + #[allow(missing_docs)] + const STATUS_INDEX_NEW = raw::GIT_STATUS_INDEX_NEW as u32, + #[allow(missing_docs)] + const STATUS_INDEX_MODIFIED = raw::GIT_STATUS_INDEX_MODIFIED as u32, + #[allow(missing_docs)] + const STATUS_INDEX_DELETED = raw::GIT_STATUS_INDEX_DELETED as u32, + #[allow(missing_docs)] + const STATUS_INDEX_RENAMED = raw::GIT_STATUS_INDEX_RENAMED as u32, + #[allow(missing_docs)] + const STATUS_INDEX_TYPECHANGE = raw::GIT_STATUS_INDEX_TYPECHANGE as u32, + + #[allow(missing_docs)] + const STATUS_WT_NEW = raw::GIT_STATUS_WT_NEW as u32, + #[allow(missing_docs)] + const STATUS_WT_MODIFIED = raw::GIT_STATUS_WT_MODIFIED as u32, + #[allow(missing_docs)] + const STATUS_WT_DELETED = raw::GIT_STATUS_WT_DELETED as u32, + #[allow(missing_docs)] + const STATUS_WT_TYPECHANGE = raw::GIT_STATUS_WT_TYPECHANGE as u32, + #[allow(missing_docs)] + const STATUS_WT_RENAMED = raw::GIT_STATUS_WT_RENAMED as u32, + + #[allow(missing_docs)] + const STATUS_IGNORED = raw::GIT_STATUS_IGNORED as u32, + #[allow(missing_docs)] + const STATUS_CONFLICTED = raw::GIT_STATUS_CONFLICTED as u32, + } +} + +bitflags! { + /// Mode options for RepositoryInitOptions + pub flags RepositoryInitMode: u32 { + /// Use permissions configured by umask - the default + const REPOSITORY_INIT_SHARED_UMASK = + raw::GIT_REPOSITORY_INIT_SHARED_UMASK as u32, + /// Use `--shared=group` behavior, chmod'ing the new repo to be + /// group writable and \"g+sx\" for sticky group assignment + const REPOSITORY_INIT_SHARED_GROUP = + raw::GIT_REPOSITORY_INIT_SHARED_GROUP as u32, + /// Use `--shared=all` behavior, adding world readability. + const REPOSITORY_INIT_SHARED_ALL = + raw::GIT_REPOSITORY_INIT_SHARED_ALL as u32, + } +} + +/// What type of change is described by a `DiffDelta`? +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum Delta { + /// No changes + Unmodified, + /// Entry does not exist in old version + Added, + /// Entry does not exist in new version + Deleted, + /// Entry content changed between old and new + Modified, + /// Entry was renamed wbetween old and new + Renamed, + /// Entry was copied from another old entry + Copied, + /// Entry is ignored item in workdir + Ignored, + /// Entry is untracked item in workdir + Untracked, + /// Type of entry changed between old and new + Typechange, + /// Entry is unreadable + Unreadable, + /// Entry in the index is conflicted + Conflicted, +} + +bitflags! { + /// Return codes for submodule status. + /// + /// A combination of these flags will be returned to describe the status of a + /// submodule. Depending on the "ignore" property of the submodule, some of + /// the flags may never be returned because they indicate changes that are + /// supposed to be ignored. + /// + /// Submodule info is contained in 4 places: the HEAD tree, the index, config + /// files (both .git/config and .gitmodules), and the working directory. Any + /// or all of those places might be missing information about the submodule + /// depending on what state the repo is in. We consider all four places to + /// build the combination of status flags. + /// + /// There are four values that are not really status, but give basic info + /// about what sources of submodule data are available. These will be + /// returned even if ignore is set to "ALL". + /// + /// * IN_HEAD - superproject head contains submodule + /// * IN_INDEX - superproject index contains submodule + /// * IN_CONFIG - superproject gitmodules has submodule + /// * IN_WD - superproject workdir has submodule + /// + /// The following values will be returned so long as ignore is not "ALL". + /// + /// * INDEX_ADDED - in index, not in head + /// * INDEX_DELETED - in head, not in index + /// * INDEX_MODIFIED - index and head don't match + /// * WD_UNINITIALIZED - workdir contains empty directory + /// * WD_ADDED - in workdir, not index + /// * WD_DELETED - in index, not workdir + /// * WD_MODIFIED - index and workdir head don't match + /// + /// The following can only be returned if ignore is "NONE" or "UNTRACKED". + /// + /// * WD_INDEX_MODIFIED - submodule workdir index is dirty + /// * WD_WD_MODIFIED - submodule workdir has modified files + /// + /// Lastly, the following will only be returned for ignore "NONE". + /// + /// * WD_UNTRACKED - wd contains untracked files + pub flags SubmoduleStatus: u32 { + #[allow(missing_docs)] + const SUBMODULE_STATUS_IN_HEAD = + raw::GIT_SUBMODULE_STATUS_IN_HEAD as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_IN_INDEX = + raw::GIT_SUBMODULE_STATUS_IN_INDEX as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_IN_CONFIG = + raw::GIT_SUBMODULE_STATUS_IN_CONFIG as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_IN_WD = + raw::GIT_SUBMODULE_STATUS_IN_WD as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_INDEX_ADDED = + raw::GIT_SUBMODULE_STATUS_INDEX_ADDED as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_INDEX_DELETED = + raw::GIT_SUBMODULE_STATUS_INDEX_DELETED as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_INDEX_MODIFIED = + raw::GIT_SUBMODULE_STATUS_INDEX_MODIFIED as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_WD_UNINITIALIZED = + raw::GIT_SUBMODULE_STATUS_WD_UNINITIALIZED as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_WD_ADDED = + raw::GIT_SUBMODULE_STATUS_WD_ADDED as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_WD_DELETED = + raw::GIT_SUBMODULE_STATUS_WD_DELETED as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_WD_MODIFIED = + raw::GIT_SUBMODULE_STATUS_WD_MODIFIED as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_WD_INDEX_MODIFIED = + raw::GIT_SUBMODULE_STATUS_WD_INDEX_MODIFIED as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_WD_WD_MODIFIED = + raw::GIT_SUBMODULE_STATUS_WD_WD_MODIFIED as u32, + #[allow(missing_docs)] + const SUBMODULE_STATUS_WD_UNTRACKED = + raw::GIT_SUBMODULE_STATUS_WD_UNTRACKED as u32, + } + +} + +/// Submodule ignore values +/// +/// These values represent settings for the `submodule.$name.ignore` +/// configuration value which says how deeply to look at the working +/// directory when getting the submodule status. +pub enum SubmoduleIgnore { + /// Use the submodule's configuration + Unspecified, + /// Any change or untracked file is considered dirty + None, + /// Only dirty if tracked files have changed + Untracked, + /// Only dirty if HEAD has moved + Dirty, + /// Never dirty + All, +} + +bitflags! { + /// ... + pub flags PathspecFlags: u32 { + /// Use the default pathspec matching configuration. + const PATHSPEC_DEFAULT = raw::GIT_PATHSPEC_DEFAULT as u32, + /// Force matching to ignore case, otherwise matching will use native + /// case sensitivity fo the platform filesystem. + const PATHSPEC_IGNORE_CASE = raw::GIT_PATHSPEC_IGNORE_CASE as u32, + /// Force case sensitive matches, otherwise match will use the native + /// case sensitivity of the platform filesystem. + const PATHSPEC_USE_CASE = raw::GIT_PATHSPEC_USE_CASE as u32, + /// Disable glob patterns and just use simple string comparison for + /// matching. + const PATHSPEC_NO_GLOB = raw::GIT_PATHSPEC_NO_GLOB as u32, + /// Means that match functions return the error code `NotFound` if no + /// matches are found. By default no matches is a success. + const PATHSPEC_NO_MATCH_ERROR = raw::GIT_PATHSPEC_NO_MATCH_ERROR as u32, + /// Means that the list returned should track which patterns matched + /// which files so that at the end of the match we can identify patterns + /// that did not match any files. + const PATHSPEC_FIND_FAILURES = raw::GIT_PATHSPEC_FIND_FAILURES as u32, + /// Means that the list returned does not need to keep the actual + /// matching filenames. Use this to just test if there were any matches + /// at all or in combination with `PATHSPEC_FAILURES` to validate a + /// pathspec. + const PATHSPEC_FAILURES_ONLY = raw::GIT_PATHSPEC_FAILURES_ONLY as u32, + } +} + +bitflags! { + /// Types of notifications emitted from checkouts. + pub flags CheckoutNotificationType: u32 { + /// Notification about a conflict. + const CHECKOUT_NOTIFICATION_CONFLICT = raw::GIT_CHECKOUT_NOTIFY_CONFLICT as u32, + /// Notification about a dirty file. + const CHECKOUT_NOTIFICATION_DIRTY = raw::GIT_CHECKOUT_NOTIFY_DIRTY as u32, + /// Notification about an updated file. + const CHECKOUT_NOTIFICATION_UPDATED = raw::GIT_CHECKOUT_NOTIFY_UPDATED as u32, + /// Notification about an untracked file. + const CHECKOUT_NOTIFICATION_UNTRACKED = raw::GIT_CHECKOUT_NOTIFY_UNTRACKED as u32, + /// Notification about an ignored file. + const CHECKOUT_NOTIFICATION_IGNORED = raw::GIT_CHECKOUT_NOTIFY_IGNORED as u32, + } +} + +/// Possible output formats for diff data +#[derive(Copy, Clone)] +pub enum DiffFormat { + /// full git diff + Patch, + /// just the headers of the patch + PatchHeader, + /// like git diff --raw + Raw, + /// like git diff --name-only + NameOnly, + /// like git diff --name-status + NameStatus, +} + +bitflags! { + /// Formatting options for diff stats + pub flags DiffStatsFormat: raw::git_diff_stats_format_t { + /// Don't generate any stats + const DIFF_STATS_NONE = raw::GIT_DIFF_STATS_NONE, + /// Equivalent of `--stat` in git + const DIFF_STATS_FULL = raw::GIT_DIFF_STATS_FULL, + /// Equivalent of `--shortstat` in git + const DIFF_STATS_SHORT = raw::GIT_DIFF_STATS_SHORT, + /// Equivalent of `--numstat` in git + const DIFF_STATS_NUMBER = raw::GIT_DIFF_STATS_NUMBER, + /// Extended header information such as creations, renames and mode + /// changes, equivalent of `--summary` in git + const DIFF_STATS_INCLUDE_SUMMARY = + raw::GIT_DIFF_STATS_INCLUDE_SUMMARY, + } +} + +/// Automatic tag following options. +pub enum AutotagOption { + /// Use the setting from the remote's configuration + Unspecified, + /// Ask the server for tags pointing to objects we're already downloading + Auto, + /// Don't ask for any tags beyond the refspecs + None, + /// Ask for all the tags + All, +} + +/// Configuration for how pruning is done on a fetch +pub enum FetchPrune { + /// Use the setting from the configuration + Unspecified, + /// Force pruning on + On, + /// Force pruning off + Off, +} + +#[allow(missing_docs)] +#[derive(Debug)] +pub enum StashApplyProgress { + /// None + None, + /// Loading the stashed data from the object database + LoadingStash, + /// The stored index is being analyzed + AnalyzeIndex, + /// The modified files are being analyzed + AnalyzeModified, + /// The untracked and ignored files are being analyzed + AnalyzeUntracked, + /// The untracked files are being written to disk + CheckoutUntracked, + /// The modified files are being written to disk + CheckoutModified, + /// The stash was applied successfully + Done, +} + +bitflags! { + #[allow(missing_docs)] + pub flags StashApplyFlags: u32 { + #[allow(missing_docs)] + const STASH_APPLY_DEFAULT = raw::GIT_STASH_APPLY_DEFAULT as u32, + /// Try to reinstate not only the working tree's changes, + /// but also the index's changes. + const STASH_APPLY_REINSTATE_INDEX = raw::GIT_STASH_APPLY_REINSTATE_INDEX as u32, + } +} + +bitflags! { + #[allow(missing_docs)] + pub flags StashFlags: u32 { + #[allow(missing_docs)] + const STASH_DEFAULT = raw::GIT_STASH_DEFAULT as u32, + /// All changes already added to the index are left intact in + /// the working directory + const STASH_KEEP_INDEX = raw::GIT_STASH_KEEP_INDEX as u32, + /// All untracked files are also stashed and then cleaned up + /// from the working directory + const STASH_INCLUDE_UNTRACKED = raw::GIT_STASH_INCLUDE_UNTRACKED as u32, + /// All ignored files are also stashed and then cleaned up from + /// the working directory + const STASH_INCLUDE_IGNORED = raw::GIT_STASH_INCLUDE_IGNORED as u32, + } +} + +#[cfg(test)] +mod tests { + use super::ObjectType; + + #[test] + fn convert() { + assert_eq!(ObjectType::Blob.str(), "blob"); + assert_eq!(ObjectType::from_str("blob"), Some(ObjectType::Blob)); + assert!(ObjectType::Blob.is_loose()); + } + +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/merge.rs cargo-0.19.0/vendor/git2-0.6.4/src/merge.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/merge.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/merge.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,154 @@ +use std::marker; +use std::mem; +use libc::c_uint; + +use {raw, Oid, Commit, FileFavor}; +use util::Binding; +use call::Convert; + +/// A structure to represent an annotated commit, the input to merge and rebase. +/// +/// An annotated commit contains information about how it was looked up, which +/// may be useful for functions like merge or rebase to provide context to the +/// operation. +pub struct AnnotatedCommit<'repo> { + raw: *mut raw::git_annotated_commit, + _marker: marker::PhantomData>, +} + +/// Options to specify when merging. +pub struct MergeOptions { + raw: raw::git_merge_options, +} + +impl<'repo> AnnotatedCommit<'repo> { + /// Gets the commit ID that the given git_annotated_commit refers to + pub fn id(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_annotated_commit_id(self.raw)) } + } +} + +impl MergeOptions { + /// Creates a default set of merge options. + pub fn new() -> MergeOptions { + let mut opts = MergeOptions { + raw: unsafe { mem::zeroed() }, + }; + assert_eq!(unsafe { + raw::git_merge_init_options(&mut opts.raw, 1) + }, 0); + opts + } + + /// Detect file renames + pub fn find_renames(&mut self, find: bool) -> &mut MergeOptions { + if find { + self.raw.flags |= raw::GIT_MERGE_FIND_RENAMES; + } else { + self.raw.flags &= !raw::GIT_MERGE_FIND_RENAMES; + } + self + } + + /// Similarity to consider a file renamed (default 50) + pub fn rename_threshold(&mut self, thresh: u32) -> &mut MergeOptions { + self.raw.rename_threshold = thresh; + self + } + + /// Maximum similarity sources to examine for renames (default 200). + /// If the number of rename candidates (add / delete pairs) is greater + /// than this value, inexact rename detection is aborted. This setting + /// overrides the `merge.renameLimit` configuration value. + pub fn target_limit(&mut self, limit: u32) -> &mut MergeOptions { + self.raw.target_limit = limit as c_uint; + self + } + + /// Maximum number of times to merge common ancestors to build a + /// virtual merge base when faced with criss-cross merges. When + /// this limit is reached, the next ancestor will simply be used + /// instead of attempting to merge it. The default is unlimited. + pub fn recursion_limit(&mut self, limit: u32) -> &mut MergeOptions { + self.raw.recursion_limit = limit as c_uint; + self + } + + /// Specify a side to favor for resolving conflicts + pub fn file_favor(&mut self, favor: FileFavor) -> &mut MergeOptions { + self.raw.file_favor = favor.convert(); + self + } + + fn flag(&mut self, opt: raw::git_merge_file_flag_t, val: bool) -> &mut MergeOptions { + if val { + self.raw.file_flags |= opt; + } else { + self.raw.file_flags &= !opt; + } + self + } + + /// Create standard conflicted merge files + pub fn standard_style(&mut self, standard: bool) -> &mut MergeOptions { + self.flag(raw::GIT_MERGE_FILE_STYLE_MERGE, standard) + } + + /// Create diff3-style file + pub fn diff3_style(&mut self, diff3: bool) -> &mut MergeOptions { + self.flag(raw::GIT_MERGE_FILE_STYLE_DIFF3, diff3) + } + + /// Condense non-alphanumeric regions for simplified diff file + pub fn simplify_alnum(&mut self, simplify: bool) -> &mut MergeOptions { + self.flag(raw::GIT_MERGE_FILE_SIMPLIFY_ALNUM, simplify) + } + + /// Ignore all whitespace + pub fn ignore_whitespace(&mut self, ignore: bool) -> &mut MergeOptions { + self.flag(raw::GIT_MERGE_FILE_IGNORE_WHITESPACE, ignore) + } + + /// Ignore changes in amount of whitespace + pub fn ignore_whitespace_change(&mut self, ignore: bool) -> &mut MergeOptions { + self.flag(raw::GIT_MERGE_FILE_IGNORE_WHITESPACE_CHANGE, ignore) + } + + /// Ignore whitespace at end of line + pub fn ignore_whitespace_eol(&mut self, ignore: bool) -> &mut MergeOptions { + self.flag(raw::GIT_MERGE_FILE_IGNORE_WHITESPACE_EOL, ignore) + } + + /// Use the "patience diff" algorithm + pub fn patience(&mut self, patience: bool) -> &mut MergeOptions { + self.flag(raw::GIT_MERGE_FILE_DIFF_PATIENCE, patience) + } + + /// Take extra time to find minimal diff + pub fn minimal(&mut self, minimal: bool) -> &mut MergeOptions { + self.flag(raw::GIT_MERGE_FILE_DIFF_MINIMAL, minimal) + } + + /// Acquire a pointer to the underlying raw options. + pub unsafe fn raw(&self) -> *const raw::git_merge_options { + &self.raw as *const _ + } +} + +impl<'repo> Binding for AnnotatedCommit<'repo> { + type Raw = *mut raw::git_annotated_commit; + unsafe fn from_raw(raw: *mut raw::git_annotated_commit) + -> AnnotatedCommit<'repo> { + AnnotatedCommit { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *mut raw::git_annotated_commit { self.raw } +} + +impl<'repo> Drop for AnnotatedCommit<'repo> { + fn drop(&mut self) { + unsafe { raw::git_annotated_commit_free(self.raw) } + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/message.rs cargo-0.19.0/vendor/git2-0.6.4/src/message.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/message.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/message.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,52 @@ +use std::ffi::CString; + +use libc::{c_char, c_int}; + +use {raw, Buf, Error, IntoCString}; +use util::Binding; + +/// Clean up a message, removing extraneous whitespace, and ensure that the +/// message ends with a newline. If comment_char is Some, also remove comment +/// lines starting with that character. +pub fn message_prettify(message: T, comment_char: Option) + -> Result { + _message_prettify(try!(message.into_c_string()), comment_char) +} + +fn _message_prettify(message: CString, comment_char: Option) + -> Result { + let ret = Buf::new(); + unsafe { + try_call!(raw::git_message_prettify(ret.raw(), message, + comment_char.is_some() as c_int, + comment_char.unwrap_or(0) as c_char)); + } + Ok(ret.as_str().unwrap().to_string()) +} + +/// The default comment character for message_prettify ('#') +pub const DEFAULT_COMMENT_CHAR: Option = Some('#' as u8); + +#[cfg(test)] +mod tests { + use {message_prettify, DEFAULT_COMMENT_CHAR}; + + #[test] + fn prettify() { + // This does not attempt to duplicate the extensive tests for + // git_message_prettify in libgit2, just a few representative values to + // make sure the interface works as expected. + assert_eq!(message_prettify("1\n\n\n2", None).unwrap(), + "1\n\n2\n"); + assert_eq!(message_prettify("1\n\n\n2\n\n\n3", None).unwrap(), + "1\n\n2\n\n3\n"); + assert_eq!(message_prettify("1\n# comment\n# more", None).unwrap(), + "1\n# comment\n# more\n"); + assert_eq!(message_prettify("1\n# comment\n# more", + DEFAULT_COMMENT_CHAR).unwrap(), + "1\n"); + assert_eq!(message_prettify("1\n; comment\n; more", + Some(';' as u8)).unwrap(), + "1\n"); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/note.rs cargo-0.19.0/vendor/git2-0.6.4/src/note.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/note.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/note.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,125 @@ +use std::marker; +use std::str; + +use {raw, signature, Signature, Oid, Repository, Error}; +use util::Binding; + +/// A structure representing a [note][note] in git. +/// +/// [note]: http://git-scm.com/blog/2010/08/25/notes.html +pub struct Note<'repo> { + raw: *mut raw::git_note, + + // Hmm, the current libgit2 version does not have this inside of it, but + // perhaps it's a good idea to keep it around? Can always remove it later I + // suppose... + _marker: marker::PhantomData<&'repo Repository>, +} + +/// An iterator over all of the notes within a repository. +pub struct Notes<'repo> { + raw: *mut raw::git_note_iterator, + _marker: marker::PhantomData<&'repo Repository>, +} + +impl<'repo> Note<'repo> { + /// Get the note author + pub fn author(&self) -> Signature { + unsafe { + signature::from_raw_const(self, raw::git_note_author(&*self.raw)) + } + } + + /// Get the note committer + pub fn committer(&self) -> Signature { + unsafe { + signature::from_raw_const(self, raw::git_note_committer(&*self.raw)) + } + } + + /// Get the note message, in bytes. + pub fn message_bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, raw::git_note_message(&*self.raw)).unwrap() } + } + + /// Get the note message as a string, returning `None` if it is not UTF-8. + pub fn message(&self) -> Option<&str> { + str::from_utf8(self.message_bytes()).ok() + } + + /// Get the note object's id + pub fn id(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_note_id(&*self.raw)) } + } +} + +impl<'repo> Binding for Note<'repo> { + type Raw = *mut raw::git_note; + unsafe fn from_raw(raw: *mut raw::git_note) -> Note<'repo> { + Note { raw: raw, _marker: marker::PhantomData, } + } + fn raw(&self) -> *mut raw::git_note { self.raw } +} + + +impl<'repo> Drop for Note<'repo> { + fn drop(&mut self) { + unsafe { raw::git_note_free(self.raw); } + } +} + +impl<'repo> Binding for Notes<'repo> { + type Raw = *mut raw::git_note_iterator; + unsafe fn from_raw(raw: *mut raw::git_note_iterator) -> Notes<'repo> { + Notes { raw: raw, _marker: marker::PhantomData, } + } + fn raw(&self) -> *mut raw::git_note_iterator { self.raw } +} + +impl<'repo> Iterator for Notes<'repo> { + type Item = Result<(Oid, Oid), Error>; + fn next(&mut self) -> Option> { + let mut note_id = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + let mut annotated_id = note_id; + unsafe { + try_call_iter!(raw::git_note_next(&mut note_id, &mut annotated_id, + self.raw)); + Some(Ok((Binding::from_raw(¬e_id as *const _), + Binding::from_raw(&annotated_id as *const _)))) + } + } +} + +impl<'repo> Drop for Notes<'repo> { + fn drop(&mut self) { + unsafe { raw::git_note_iterator_free(self.raw); } + } +} + +#[cfg(test)] +mod tests { + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + assert!(repo.notes(None).is_err()); + + let sig = repo.signature().unwrap(); + let head = repo.head().unwrap().target().unwrap(); + let note = repo.note(&sig, &sig, None, head, "foo", false).unwrap(); + assert_eq!(repo.notes(None).unwrap().count(), 1); + + let note_obj = repo.find_note(None, head).unwrap(); + assert_eq!(note_obj.id(), note); + assert_eq!(note_obj.message(), Some("foo")); + + let (a, b) = repo.notes(None).unwrap().next().unwrap().unwrap(); + assert_eq!(a, note); + assert_eq!(b, head); + + assert_eq!(repo.note_default_ref().unwrap(), "refs/notes/commits"); + + assert_eq!(sig.name(), note_obj.author().name()); + assert_eq!(sig.name(), note_obj.committer().name()); + assert!(sig.when() == note_obj.committer().when()); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/object.rs cargo-0.19.0/vendor/git2-0.6.4/src/object.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/object.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/object.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,174 @@ +use std::marker; +use std::mem; +use std::ptr; + +use {raw, Oid, ObjectType, Error, Buf, Commit, Tag, Blob, Tree, Repository}; +use {Describe, DescribeOptions}; +use util::Binding; + +/// A structure to represent a git [object][1] +/// +/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects +pub struct Object<'repo> { + raw: *mut raw::git_object, + _marker: marker::PhantomData<&'repo Repository>, +} + +impl<'repo> Object<'repo> { + /// Get the id (SHA1) of a repository object + pub fn id(&self) -> Oid { + unsafe { + Binding::from_raw(raw::git_object_id(&*self.raw)) + } + } + + /// Get the object type of an object. + /// + /// If the type is unknown, then `None` is returned. + pub fn kind(&self) -> Option { + ObjectType::from_raw(unsafe { raw::git_object_type(&*self.raw) }) + } + + /// Recursively peel an object until an object of the specified type is met. + /// + /// If you pass `Any` as the target type, then the object will be + /// peeled until the type changes (e.g. a tag will be chased until the + /// referenced object is no longer a tag). + pub fn peel(&self, kind: ObjectType) -> Result, Error> { + let mut raw = 0 as *mut raw::git_object; + unsafe { + try_call!(raw::git_object_peel(&mut raw, &*self.raw(), kind)); + Ok(Binding::from_raw(raw)) + } + } + + /// Get a short abbreviated OID string for the object + /// + /// This starts at the "core.abbrev" length (default 7 characters) and + /// iteratively extends to a longer string if that length is ambiguous. The + /// result will be unambiguous (at least until new objects are added to the + /// repository). + pub fn short_id(&self) -> Result { + unsafe { + let buf = Buf::new(); + try_call!(raw::git_object_short_id(buf.raw(), &*self.raw())); + Ok(buf) + } + } + + /// Attempt to view this object as a commit. + /// + /// Returns `None` if the object is not actually a commit. + pub fn as_commit(&self) -> Option<&Commit<'repo>> { + self.cast(ObjectType::Commit) + } + + /// Attempt to consume this object and return a commit. + /// + /// Returns `Err(self)` if this object is not actually a commit. + pub fn into_commit(self) -> Result, Object<'repo>> { + self.cast_into(ObjectType::Commit) + } + + /// Attempt to view this object as a tag. + /// + /// Returns `None` if the object is not actually a tag. + pub fn as_tag(&self) -> Option<&Tag<'repo>> { + self.cast(ObjectType::Tag) + } + + /// Attempt to consume this object and return a tag. + /// + /// Returns `Err(self)` if this object is not actually a tag. + pub fn into_tag(self) -> Result, Object<'repo>> { + self.cast_into(ObjectType::Tag) + } + + /// Attempt to view this object as a tree. + /// + /// Returns `None` if the object is not actually a tree. + pub fn as_tree(&self) -> Option<&Tree<'repo>> { + self.cast(ObjectType::Tree) + } + + /// Attempt to consume this object and return a tree. + /// + /// Returns `Err(self)` if this object is not actually a tree. + pub fn into_tree(self) -> Result, Object<'repo>> { + self.cast_into(ObjectType::Tree) + } + + /// Attempt to view this object as a blob. + /// + /// Returns `None` if the object is not actually a blob. + pub fn as_blob(&self) -> Option<&Blob<'repo>> { + self.cast(ObjectType::Blob) + } + + /// Attempt to consume this object and return a blob. + /// + /// Returns `Err(self)` if this object is not actually a blob. + pub fn into_blob(self) -> Result, Object<'repo>> { + self.cast_into(ObjectType::Blob) + } + + /// Describes a commit + /// + /// Performs a describe operation on this commitish object. + pub fn describe(&self, opts: &DescribeOptions) + -> Result { + let mut ret = 0 as *mut _; + unsafe { + try_call!(raw::git_describe_commit(&mut ret, self.raw, opts.raw())); + Ok(Binding::from_raw(ret)) + } + } + + fn cast(&self, kind: ObjectType) -> Option<&T> { + assert_eq!(mem::size_of::(), mem::size_of::()); + if self.kind() == Some(kind) { + unsafe { Some(&*(self as *const _ as *const T)) } + } else { + None + } + } + + fn cast_into(self, kind: ObjectType) -> Result> { + assert_eq!(mem::size_of_val(&self), mem::size_of::()); + if self.kind() == Some(kind) { + Ok(unsafe { + let other = ptr::read(&self as *const _ as *const T); + mem::forget(self); + other + }) + } else { + Err(self) + } + } +} + +impl<'repo> Clone for Object<'repo> { + fn clone(&self) -> Object<'repo> { + let mut raw = 0 as *mut raw::git_object; + unsafe { + let rc = raw::git_object_dup(&mut raw, self.raw); + assert_eq!(rc, 0); + Binding::from_raw(raw) + } + } +} + +impl<'repo> Binding for Object<'repo> { + type Raw = *mut raw::git_object; + + unsafe fn from_raw(raw: *mut raw::git_object) -> Object<'repo> { + Object { raw: raw, _marker: marker::PhantomData, } + } + fn raw(&self) -> *mut raw::git_object { self.raw } +} + +impl<'repo> Drop for Object<'repo> { + fn drop(&mut self) { + unsafe { raw::git_object_free(self.raw) } + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/oid_array.rs cargo-0.19.0/vendor/git2-0.6.4/src/oid_array.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/oid_array.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/oid_array.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,44 @@ +//! Bindings to libgit2's raw git_strarray type + +use std::ops::Deref; + +use oid::Oid; +use raw; +use util::Binding; +use std::slice; +use std::mem; + +/// An oid array structure used by libgit2 +/// +/// Some apis return arrays of oids which originate from libgit2. This +/// wrapper type behaves a little like `Vec<&Oid>` but does so without copying +/// the underlying Oids until necessary. +pub struct OidArray { + raw: raw::git_oidarray, +} + +impl Deref for OidArray { + type Target = [Oid]; + + fn deref(&self) -> &[Oid] { + unsafe { + debug_assert_eq!(mem::size_of::(), mem::size_of_val(&*self.raw.ids)); + + slice::from_raw_parts(self.raw.ids as *const Oid, self.raw.count as usize) + } + } +} + +impl Binding for OidArray { + type Raw = raw::git_oidarray; + unsafe fn from_raw(raw: raw::git_oidarray) -> OidArray { + OidArray { raw: raw } + } + fn raw(&self) -> raw::git_oidarray { self.raw } +} + +impl Drop for OidArray { + fn drop(&mut self) { + unsafe { raw::git_oidarray_free(&mut self.raw) } + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/oid.rs cargo-0.19.0/vendor/git2-0.6.4/src/oid.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/oid.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/oid.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,144 @@ +use std::fmt; +use std::cmp::Ordering; +use std::hash::{Hasher, Hash}; +use std::str; +use libc; + +use {raw, Error}; +use util::Binding; + +/// Unique identity of any object (commit, tree, blob, tag). +#[derive(Copy)] +pub struct Oid { + raw: raw::git_oid, +} + +impl Oid { + /// Parse a hex-formatted object id into an Oid structure. + /// + /// If the string is not a valid 40-character hex string, an error is + /// returned. + pub fn from_str(s: &str) -> Result { + ::init(); + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_oid_fromstrn(&mut raw, + s.as_bytes().as_ptr() + as *const libc::c_char, + s.len() as libc::size_t)); + } + Ok(Oid { raw: raw }) + } + + /// Parse a raw object id into an Oid structure. + /// + /// If the array given is not 20 bytes in length, an error is returned. + pub fn from_bytes(bytes: &[u8]) -> Result { + ::init(); + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + if bytes.len() != raw::GIT_OID_RAWSZ { + Err(Error::from_str("raw byte array must be 20 bytes")) + } else { + unsafe { raw::git_oid_fromraw(&mut raw, bytes.as_ptr()) } + Ok(Oid { raw: raw }) + } + } + + /// View this OID as a byte-slice 20 bytes in length. + pub fn as_bytes(&self) -> &[u8] { &self.raw.id } + + /// Test if this OID is all zeros. + pub fn is_zero(&self) -> bool { + unsafe { raw::git_oid_iszero(&self.raw) == 1 } + } +} + +impl Binding for Oid { + type Raw = *const raw::git_oid; + + unsafe fn from_raw(oid: *const raw::git_oid) -> Oid { + Oid { raw: *oid } + } + fn raw(&self) -> *const raw::git_oid { &self.raw as *const _ } +} + +impl fmt::Debug for Oid { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for Oid { + /// Hex-encode this Oid into a formatter. + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut dst = [0u8; raw::GIT_OID_HEXSZ + 1]; + unsafe { + raw::git_oid_tostr(dst.as_mut_ptr() as *mut libc::c_char, + dst.len() as libc::size_t, &self.raw); + } + let s = &dst[..dst.iter().position(|&a| a == 0).unwrap()]; + str::from_utf8(s).unwrap().fmt(f) + } +} + +impl str::FromStr for Oid { + type Err = Error; + + /// Parse a hex-formatted object id into an Oid structure. + /// + /// If the string is not a valid 40-character hex string, an error is + /// returned. + fn from_str(s: &str) -> Result { + Oid::from_str(s) + } +} + +impl PartialEq for Oid { + fn eq(&self, other: &Oid) -> bool { + unsafe { raw::git_oid_equal(&self.raw, &other.raw) != 0 } + } +} +impl Eq for Oid {} + +impl PartialOrd for Oid { + fn partial_cmp(&self, other: &Oid) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Oid { + fn cmp(&self, other: &Oid) -> Ordering { + match unsafe { raw::git_oid_cmp(&self.raw, &other.raw) } { + 0 => Ordering::Equal, + n if n < 0 => Ordering::Less, + _ => Ordering::Greater, + } + } +} + +impl Clone for Oid { + fn clone(&self) -> Oid { *self } +} + +impl Hash for Oid { + fn hash(&self, into: &mut H) { + self.raw.id.hash(into) + } +} + +impl AsRef<[u8]> for Oid { + fn as_ref(&self) -> &[u8] { self.as_bytes() } +} + +#[cfg(test)] +mod tests { + use super::Oid; + + #[test] + fn conversions() { + assert!(Oid::from_str("foo").is_err()); + assert!(Oid::from_str("decbf2be529ab6557d5429922251e5ee36519817").is_ok()); + assert!(Oid::from_bytes(b"foo").is_err()); + assert!(Oid::from_bytes(b"00000000000000000000").is_ok()); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/packbuilder.rs cargo-0.19.0/vendor/git2-0.6.4/src/packbuilder.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/packbuilder.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/packbuilder.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,386 @@ +use std::marker; +use std::ptr; +use std::slice; +use libc::{c_int, c_uint, c_void, size_t}; + +use {raw, panic, Repository, Error, Oid, Revwalk, Buf}; +use util::Binding; + +/// Stages that are reported by the PackBuilder progress callback. +pub enum PackBuilderStage { + /// Adding objects to the pack + AddingObjects, + /// Deltafication of the pack + Deltafication, +} + +pub type ProgressCb<'a> = FnMut(PackBuilderStage, u32, u32) -> bool + 'a; +pub type ForEachCb<'a> = FnMut(&[u8]) -> bool + 'a; + +/// A builder for creating a packfile +pub struct PackBuilder<'repo> { + raw: *mut raw::git_packbuilder, + progress: Option>>>, + _marker: marker::PhantomData<&'repo Repository>, +} + +impl<'repo> PackBuilder<'repo> { + /// Insert a single object. For an optimal pack it's mandatory to insert + /// objects in recency order, commits followed by trees and blobs. + pub fn insert_object(&mut self, id: Oid, name: Option<&str>) + -> Result<(), Error> { + let name = try!(::opt_cstr(name)); + unsafe { + try_call!(raw::git_packbuilder_insert(self.raw, id.raw(), name)); + } + Ok(()) + } + + /// Insert a root tree object. This will add the tree as well as all + /// referenced trees and blobs. + pub fn insert_tree(&mut self, id: Oid) -> Result<(), Error> { + unsafe { + try_call!(raw::git_packbuilder_insert_tree(self.raw, id.raw())); + } + Ok(()) + } + + /// Insert a commit object. This will add a commit as well as the completed + /// referenced tree. + pub fn insert_commit(&mut self, id: Oid) -> Result<(), Error> { + unsafe { + try_call!(raw::git_packbuilder_insert_commit(self.raw, id.raw())); + } + Ok(()) + } + + /// Insert objects as given by the walk. Those commits and all objects they + /// reference will be inserted into the packbuilder. + pub fn insert_walk(&mut self, walk: &mut Revwalk) -> Result<(), Error> { + unsafe { + try_call!(raw::git_packbuilder_insert_walk(self.raw, walk.raw())); + } + Ok(()) + } + + /// Recursively insert an object and its referenced objects. Insert the + /// object as well as any object it references. + pub fn insert_recursive(&mut self, id: Oid, name: Option<&str>) + -> Result<(), Error> { + let name = try!(::opt_cstr(name)); + unsafe { + try_call!(raw::git_packbuilder_insert_recur(self.raw, + id.raw(), + name)); + } + Ok(()) + } + + /// Write the contents of the packfile to an in-memory buffer. The contents + /// of the buffer will become a valid packfile, even though there will be + /// no attached index. + pub fn write_buf(&mut self, buf: &mut Buf) -> Result<(), Error> { + unsafe { + try_call!(raw::git_packbuilder_write_buf(buf.raw(), self.raw)); + } + Ok(()) + } + + /// Create the new pack and pass each object to the callback. + pub fn foreach(&mut self, mut cb: F) -> Result<(), Error> + where F: FnMut(&[u8]) -> bool + { + let mut cb = &mut cb as &mut ForEachCb; + let ptr = &mut cb as *mut _; + unsafe { + try_call!(raw::git_packbuilder_foreach(self.raw, + foreach_c, + ptr as *mut _)); + } + Ok(()) + } + + /// `progress` will be called with progress information during pack + /// building. Be aware that this is called inline with pack building + /// operations, so performance may be affected. + /// + /// There can only be one progress callback attached, this will replace any + /// existing one. See `unset_progress_callback` to remove the current + /// progress callback without attaching a new one. + pub fn set_progress_callback(&mut self, progress: F) -> Result<(), Error> + where F: FnMut(PackBuilderStage, u32, u32) -> bool + 'repo + { + let mut progress = Box::new(Box::new(progress) as Box); + let ptr = &mut *progress as *mut _; + let progress_c = Some(progress_c as raw::git_packbuilder_progress); + unsafe { + try_call!(raw::git_packbuilder_set_callbacks(self.raw, + progress_c, + ptr as *mut _)); + } + self.progress = Some(progress); + Ok(()) + } + + /// Remove the current progress callback. See `set_progress_callback` to + /// set the progress callback. + pub fn unset_progress_callback(&mut self) -> Result<(), Error> { + unsafe { + try_call!(raw::git_packbuilder_set_callbacks(self.raw, + None, + ptr::null_mut())); + self.progress = None; + } + Ok(()) + } + + /// Get the total number of objects the packbuilder will write out. + pub fn object_count(&self) -> usize { + unsafe { raw::git_packbuilder_object_count(self.raw) } + } + + /// Get the number of objects the packbuilder has already written out. + pub fn written(&self) -> usize { + unsafe { raw::git_packbuilder_written(self.raw) } + } + + /// Get the packfile's hash. A packfile's name is derived from the sorted + /// hashing of all object names. This is only correct after the packfile + /// has been written. + pub fn hash(&self) -> Option { + if self.object_count() == 0 { + unsafe { + Some(Binding::from_raw(raw::git_packbuilder_hash(self.raw))) + } + } else { + None + } + } +} + +impl<'repo> Binding for PackBuilder<'repo> { + type Raw = *mut raw::git_packbuilder; + unsafe fn from_raw(ptr: *mut raw::git_packbuilder) -> PackBuilder<'repo> { + PackBuilder { + raw: ptr, + progress: None, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *mut raw::git_packbuilder { + self.raw + } +} + +impl<'repo> Drop for PackBuilder<'repo> { + fn drop(&mut self) { + unsafe { + raw::git_packbuilder_set_callbacks(self.raw, None, ptr::null_mut()); + raw::git_packbuilder_free(self.raw); + } + } +} + +impl Binding for PackBuilderStage { + type Raw = raw::git_packbuilder_stage_t; + unsafe fn from_raw(raw: raw::git_packbuilder_stage_t) -> PackBuilderStage { + match raw { + raw::GIT_PACKBUILDER_ADDING_OBJECTS => PackBuilderStage::AddingObjects, + raw::GIT_PACKBUILDER_DELTAFICATION => PackBuilderStage::Deltafication, + _ => panic!("Unknown git diff binary kind"), + } + } + fn raw(&self) -> raw::git_packbuilder_stage_t { + match *self { + PackBuilderStage::AddingObjects => raw::GIT_PACKBUILDER_ADDING_OBJECTS, + PackBuilderStage::Deltafication => raw::GIT_PACKBUILDER_DELTAFICATION, + } + } +} + +extern fn foreach_c(buf: *const c_void, + size: size_t, + data: *mut c_void) + -> c_int { + unsafe { + let buf = slice::from_raw_parts(buf as *const u8, size as usize); + + let r = panic::wrap(|| { + let data = data as *mut &mut ForEachCb; + (*data)(buf) + }); + if r == Some(true) { + 0 + } else { + -1 + } + } +} + +extern fn progress_c(stage: raw::git_packbuilder_stage_t, + current: c_uint, + total: c_uint, + data: *mut c_void) + -> c_int { + unsafe { + let stage = Binding::from_raw(stage); + + let r = panic::wrap(|| { + let data = data as *mut Box; + (*data)(stage, current, total) + }); + if r == Some(true) { + 0 + } else { + -1 + } + } +} + +#[cfg(test)] +mod tests { + use std::fs::File; + use std::path::Path; + use {Buf, Repository, Oid}; + + fn commit(repo: &Repository) -> (Oid, Oid) { + let mut index = t!(repo.index()); + let root = repo.path().parent().unwrap(); + t!(File::create(&root.join("foo"))); + t!(index.add_path(Path::new("foo"))); + + let tree_id = t!(index.write_tree()); + let tree = t!(repo.find_tree(tree_id)); + let sig = t!(repo.signature()); + let head_id = t!(repo.refname_to_id("HEAD")); + let parent = t!(repo.find_commit(head_id)); + let commit = t!(repo.commit(Some("HEAD"), + &sig, + &sig, + "commit", + &tree, + &[&parent])); + (commit, tree_id) + } + + fn pack_header(len: u8) -> Vec { + [].into_iter() + .chain(b"PACK") // signature + .chain(&[0, 0, 0, 2]) // version number + .chain(&[0, 0, 0, len]) // number of objects + .cloned().collect::>() + } + + fn empty_pack_header() -> Vec { + pack_header(0).iter() + .chain(&[0x02, 0x9d, 0x08, 0x82, 0x3b, // ^ + 0xd8, 0xa8, 0xea, 0xb5, 0x10, // | SHA-1 of the zero + 0xad, 0x6a, 0xc7, 0x5c, 0x82, // | object pack header + 0x3c, 0xfd, 0x3e, 0xd3, 0x1e]) // v + .cloned().collect::>() + } + + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + let _builder = t!(repo.packbuilder()); + } + + #[test] + fn smoke_write_buf() { + let (_td, repo) = ::test::repo_init(); + let mut builder = t!(repo.packbuilder()); + let mut buf = Buf::new(); + t!(builder.write_buf(&mut buf)); + assert!(builder.hash().unwrap().is_zero()); + assert_eq!(&*buf, &*empty_pack_header()); + } + + #[test] + fn smoke_foreach() { + let (_td, repo) = ::test::repo_init(); + let mut builder = t!(repo.packbuilder()); + let mut buf = Vec::::new(); + t!(builder.foreach(|bytes| { + buf.extend(bytes); + true + })); + assert_eq!(&*buf, &*empty_pack_header()); + } + + #[test] + fn insert_write_buf() { + let (_td, repo) = ::test::repo_init(); + let mut builder = t!(repo.packbuilder()); + let mut buf = Buf::new(); + let (commit, _tree) = commit(&repo); + t!(builder.insert_object(commit, None)); + assert_eq!(builder.object_count(), 1); + t!(builder.write_buf(&mut buf)); + // Just check that the correct number of objects are written + assert_eq!(&buf[0..12], &*pack_header(1)); + } + + #[test] + fn insert_tree_write_buf() { + let (_td, repo) = ::test::repo_init(); + let mut builder = t!(repo.packbuilder()); + let mut buf = Buf::new(); + let (_commit, tree) = commit(&repo); + // will insert the tree itself and the blob, 2 objects + t!(builder.insert_tree(tree)); + assert_eq!(builder.object_count(), 2); + t!(builder.write_buf(&mut buf)); + // Just check that the correct number of objects are written + assert_eq!(&buf[0..12], &*pack_header(2)); + } + + #[test] + fn insert_commit_write_buf() { + let (_td, repo) = ::test::repo_init(); + let mut builder = t!(repo.packbuilder()); + let mut buf = Buf::new(); + let (commit, _tree) = commit(&repo); + // will insert the commit, its tree and the blob, 3 objects + t!(builder.insert_commit(commit)); + assert_eq!(builder.object_count(), 3); + t!(builder.write_buf(&mut buf)); + // Just check that the correct number of objects are written + assert_eq!(&buf[0..12], &*pack_header(3)); + } + + #[test] + fn progress_callback() { + let mut progress_called = false; + { + let (_td, repo) = ::test::repo_init(); + let mut builder = t!(repo.packbuilder()); + let (commit, _tree) = commit(&repo); + t!(builder.set_progress_callback(|_, _, _| { + progress_called = true; + true + })); + t!(builder.insert_commit(commit)); + t!(builder.write_buf(&mut Buf::new())); + } + assert_eq!(progress_called, true); + } + + #[test] + fn clear_progress_callback() { + let mut progress_called = false; + { + let (_td, repo) = ::test::repo_init(); + let mut builder = t!(repo.packbuilder()); + let (commit, _tree) = commit(&repo); + t!(builder.set_progress_callback(|_, _, _| { + progress_called = true; + true + })); + t!(builder.unset_progress_callback()); + t!(builder.insert_commit(commit)); + t!(builder.write_buf(&mut Buf::new())); + } + assert_eq!(progress_called, false); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/panic.rs cargo-0.19.0/vendor/git2-0.6.4/src/panic.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/panic.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/panic.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,55 @@ +use std::any::Any; +use std::cell::RefCell; + +thread_local!(static LAST_ERROR: RefCell>> = { + RefCell::new(None) +}); + +#[cfg(feature = "unstable")] +pub fn wrap T + ::std::panic::UnwindSafe>(f: F) -> Option { + use std::panic; + if LAST_ERROR.with(|slot| slot.borrow().is_some()) { + return None + } + match panic::catch_unwind(f) { + Ok(ret) => Some(ret), + Err(e) => { + LAST_ERROR.with(move |slot| { + *slot.borrow_mut() = Some(e); + }); + None + } + } +} + +#[cfg(not(feature = "unstable"))] +pub fn wrap T>(f: F) -> Option { + struct Bomb { + enabled: bool, + } + impl Drop for Bomb { + fn drop(&mut self) { + if !self.enabled { + return + } + panic!("callback has panicked, and continuing to unwind into C \ + is not safe, so aborting the process"); + + } + } + let mut bomb = Bomb { enabled: true }; + let ret = Some(f()); + bomb.enabled = false; + return ret; +} + +pub fn check() { + let err = LAST_ERROR.with(|slot| slot.borrow_mut().take()); + if let Some(err) = err { + panic!(err) + } +} + +pub fn panicked() -> bool { + LAST_ERROR.with(|slot| slot.borrow().is_some()) +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/patch.rs cargo-0.19.0/vendor/git2-0.6.4/src/patch.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/patch.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/patch.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,201 @@ +use std::path::Path; +use libc::{c_char, c_int, c_void}; + +use {raw, Blob, Buf, Diff, DiffDelta, DiffHunk, DiffLine, DiffOptions, Error}; +use diff::{LineCb, print_cb}; +use util::{Binding, into_opt_c_string}; + +/// A structure representing the text changes in a single diff delta. +/// +/// This is an opaque structure. +pub struct Patch { + raw: *mut raw::git_patch, +} + +unsafe impl Send for Patch {} + +impl Binding for Patch { + type Raw = *mut raw::git_patch; + unsafe fn from_raw(raw: Self::Raw) -> Patch { + Patch { raw: raw } + } + fn raw(&self) -> Self::Raw { self.raw } +} + +impl Drop for Patch { + fn drop(&mut self) { + unsafe { raw::git_patch_free(self.raw) } + } +} + +impl Patch { + /// Return a Patch for one file in a Diff. + /// + /// Returns Ok(None) for an unchanged or binary file. + pub fn from_diff(diff: &Diff, idx: usize) -> Result, Error> { + let mut ret = 0 as *mut raw::git_patch; + unsafe { + try_call!(raw::git_patch_from_diff(&mut ret, diff.raw(), idx)); + Ok(Binding::from_raw_opt(ret)) + } + } + + /// Generate a Patch by diffing two blobs. + pub fn from_blobs(old_blob: &Blob, + old_path: Option<&Path>, + new_blob: &Blob, + new_path: Option<&Path>, + opts: Option<&mut DiffOptions>) + -> Result + { + let mut ret = 0 as *mut raw::git_patch; + let old_path = try!(into_opt_c_string(old_path)); + let new_path = try!(into_opt_c_string(new_path)); + unsafe { + try_call!(raw::git_patch_from_blobs(&mut ret, + old_blob.raw(), + old_path, + new_blob.raw(), + new_path, + opts.map(|s| s.raw()))); + Ok(Binding::from_raw(ret)) + } + } + + /// Generate a Patch by diffing a blob and a buffer. + pub fn from_blob_and_buffer(old_blob: &Blob, + old_path: Option<&Path>, + new_buffer: &[u8], + new_path: Option<&Path>, + opts: Option<&mut DiffOptions>) + -> Result + { + let mut ret = 0 as *mut raw::git_patch; + let old_path = try!(into_opt_c_string(old_path)); + let new_path = try!(into_opt_c_string(new_path)); + unsafe { + try_call!(raw::git_patch_from_blob_and_buffer(&mut ret, + old_blob.raw(), + old_path, + new_buffer.as_ptr() as *const c_char, + new_buffer.len(), + new_path, + opts.map(|s| s.raw()))); + Ok(Binding::from_raw(ret)) + } + } + + /// Generate a Patch by diffing two buffers. + pub fn from_buffers(old_buffer: &[u8], + old_path: Option<&Path>, + new_buffer: &[u8], + new_path: Option<&Path>, + opts: Option<&mut DiffOptions>) + -> Result + { + let mut ret = 0 as *mut raw::git_patch; + let old_path = try!(into_opt_c_string(old_path)); + let new_path = try!(into_opt_c_string(new_path)); + unsafe { + try_call!(raw::git_patch_from_buffers(&mut ret, + old_buffer.as_ptr() as *const c_void, + old_buffer.len(), + old_path, + new_buffer.as_ptr() as *const c_char, + new_buffer.len(), + new_path, + opts.map(|s| s.raw()))); + Ok(Binding::from_raw(ret)) + } + } + + /// Get the DiffDelta associated with the Patch. + pub fn delta(&self) -> DiffDelta { + unsafe { + Binding::from_raw(raw::git_patch_get_delta(self.raw) as *mut _) + } + } + + /// Get the number of hunks in the Patch. + pub fn num_hunks(&self) -> usize { + unsafe { + raw::git_patch_num_hunks(self.raw) + } + } + + /// Get the number of lines of context, additions, and deletions in the Patch. + pub fn line_stats(&self) -> Result<(usize, usize, usize), Error> { + let mut context = 0; + let mut additions = 0; + let mut deletions = 0; + unsafe { + try_call!(raw::git_patch_line_stats(&mut context, + &mut additions, + &mut deletions, + self.raw)); + } + Ok((context, additions, deletions)) + } + + /// Get a DiffHunk and its total line count from the Patch. + pub fn hunk(&mut self, hunk_idx: usize) -> Result<(DiffHunk, usize), Error> { + let mut ret = 0 as *const raw::git_diff_hunk; + let mut lines = 0; + unsafe { + try_call!(raw::git_patch_get_hunk(&mut ret, &mut lines, self.raw, hunk_idx)); + Ok((Binding::from_raw(ret), lines)) + } + } + + /// Get the number of lines in a hunk. + pub fn num_lines_in_hunk(&self, hunk_idx: usize) -> Result { + unsafe { + Ok(try_call!(raw::git_patch_num_lines_in_hunk(self.raw, hunk_idx)) as usize) + } + } + + /// Get a DiffLine from a hunk of the Patch. + pub fn line_in_hunk(&mut self, + hunk_idx: usize, + line_of_hunk: usize) -> Result { + let mut ret = 0 as *const raw::git_diff_line; + unsafe { + try_call!(raw::git_patch_get_line_in_hunk(&mut ret, + self.raw, + hunk_idx, + line_of_hunk)); + Ok(Binding::from_raw(ret)) + } + } + + /// Get the size of a Patch's diff data in bytes. + pub fn size(&self, + include_context: bool, + include_hunk_headers: bool, + include_file_headers: bool) -> usize { + unsafe { + raw::git_patch_size(self.raw, + include_context as c_int, + include_hunk_headers as c_int, + include_file_headers as c_int) + } + } + + /// Print the Patch to text via a callback. + pub fn print(&mut self, mut line_cb: &mut LineCb) -> Result<(), Error> { + let ptr = &mut line_cb as *mut _ as *mut c_void; + unsafe { + try_call!(raw::git_patch_print(self.raw, print_cb, ptr)); + return Ok(()) + } + } + + /// Get the Patch text as a Buf. + pub fn to_buf(&mut self) -> Result { + let buf = Buf::new(); + unsafe { + try_call!(raw::git_patch_to_buf(buf.raw(), self.raw)); + } + Ok(buf) + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/pathspec.rs cargo-0.19.0/vendor/git2-0.6.4/src/pathspec.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/pathspec.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/pathspec.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,300 @@ +use std::iter::IntoIterator; +use std::marker; +use std::ops::Range; +use std::path::Path; +use libc::size_t; + +use {raw, Error, Diff, Tree, PathspecFlags, Index, Repository, DiffDelta, IntoCString}; +use util::Binding; + +/// Structure representing a compiled pathspec used for matching against various +/// structures. +pub struct Pathspec { + raw: *mut raw::git_pathspec, +} + +/// List of filenames matching a pathspec. +pub struct PathspecMatchList<'ps> { + raw: *mut raw::git_pathspec_match_list, + _marker: marker::PhantomData<&'ps Pathspec>, +} + +/// Iterator over the matched paths in a pathspec. +pub struct PathspecEntries<'list> { + range: Range, + list: &'list PathspecMatchList<'list>, +} + +/// Iterator over the matching diff deltas. +pub struct PathspecDiffEntries<'list> { + range: Range, + list: &'list PathspecMatchList<'list>, +} + +/// Iterator over the failed list of pathspec items that did not match. +pub struct PathspecFailedEntries<'list> { + range: Range, + list: &'list PathspecMatchList<'list>, +} + +impl Pathspec { + /// Creates a new pathspec from a list of specs to match against. + pub fn new(specs: I) -> Result + where T: IntoCString, I: IntoIterator { + let (_a, _b, arr) = try!(::util::iter2cstrs(specs)); + unsafe { + let mut ret = 0 as *mut raw::git_pathspec; + try_call!(raw::git_pathspec_new(&mut ret, &arr)); + Ok(Binding::from_raw(ret)) + } + } + + /// Match a pathspec against files in a diff. + /// + /// The list returned contains the list of all matched filenames (unless you + /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the + /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is + /// specified. + pub fn match_diff(&self, diff: &Diff, flags: PathspecFlags) + -> Result { + let mut ret = 0 as *mut raw::git_pathspec_match_list; + unsafe { + try_call!(raw::git_pathspec_match_diff(&mut ret, diff.raw(), + flags.bits(), self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// Match a pathspec against files in a tree. + /// + /// The list returned contains the list of all matched filenames (unless you + /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the + /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is + /// specified. + pub fn match_tree(&self, tree: &Tree, flags: PathspecFlags) + -> Result { + let mut ret = 0 as *mut raw::git_pathspec_match_list; + unsafe { + try_call!(raw::git_pathspec_match_tree(&mut ret, tree.raw(), + flags.bits(), self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// This matches the pathspec against the files in the repository index. + /// + /// The list returned contains the list of all matched filenames (unless you + /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the + /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is + /// specified. + pub fn match_index(&self, index: &Index, flags: PathspecFlags) + -> Result { + let mut ret = 0 as *mut raw::git_pathspec_match_list; + unsafe { + try_call!(raw::git_pathspec_match_index(&mut ret, index.raw(), + flags.bits(), self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// Match a pathspec against the working directory of a repository. + /// + /// This matches the pathspec against the current files in the working + /// directory of the repository. It is an error to invoke this on a bare + /// repo. This handles git ignores (i.e. ignored files will not be + /// considered to match the pathspec unless the file is tracked in the + /// index). + /// + /// The list returned contains the list of all matched filenames (unless you + /// pass `PATHSPEC_FAILURES_ONLY` in the flags) and may also contain the + /// list of pathspecs with no match if the `PATHSPEC_FIND_FAILURES` flag is + /// specified. + pub fn match_workdir(&self, repo: &Repository, flags: PathspecFlags) + -> Result { + let mut ret = 0 as *mut raw::git_pathspec_match_list; + unsafe { + try_call!(raw::git_pathspec_match_workdir(&mut ret, repo.raw(), + flags.bits(), self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// Try to match a path against a pathspec + /// + /// Unlike most of the other pathspec matching functions, this will not fall + /// back on the native case-sensitivity for your platform. You must + /// explicitly pass flags to control case sensitivity or else this will fall + /// back on being case sensitive. + pub fn matches_path(&self, path: &Path, flags: PathspecFlags) -> bool { + let path = path.into_c_string().unwrap(); + unsafe { + raw::git_pathspec_matches_path(&*self.raw, flags.bits(), + path.as_ptr()) == 1 + } + } +} + +impl Binding for Pathspec { + type Raw = *mut raw::git_pathspec; + + unsafe fn from_raw(raw: *mut raw::git_pathspec) -> Pathspec { + Pathspec { raw: raw } + } + fn raw(&self) -> *mut raw::git_pathspec { self.raw } +} + +impl Drop for Pathspec { + fn drop(&mut self) { + unsafe { raw::git_pathspec_free(self.raw) } + } +} + +impl<'ps> PathspecMatchList<'ps> { + fn entrycount(&self) -> usize { + unsafe { raw::git_pathspec_match_list_entrycount(&*self.raw) as usize } + } + + fn failed_entrycount(&self) -> usize { + unsafe { raw::git_pathspec_match_list_failed_entrycount(&*self.raw) as usize } + } + + /// Returns an iterator over the matching filenames in this list. + pub fn entries(&self) -> PathspecEntries { + let n = self.entrycount(); + let n = if n > 0 && self.entry(0).is_none() {0} else {n}; + PathspecEntries { range: 0..n, list: self } + } + + /// Get a matching filename by position. + /// + /// If this list was generated from a diff, then the return value will + /// always be `None. + pub fn entry(&self, i: usize) -> Option<&[u8]> { + unsafe { + let ptr = raw::git_pathspec_match_list_entry(&*self.raw, i as size_t); + ::opt_bytes(self, ptr) + } + } + + /// Returns an iterator over the matching diff entries in this list. + pub fn diff_entries(&self) -> PathspecDiffEntries { + let n = self.entrycount(); + let n = if n > 0 && self.diff_entry(0).is_none() {0} else {n}; + PathspecDiffEntries { range: 0..n, list: self } + } + + /// Get a matching diff delta by position. + /// + /// If the list was not generated from a diff, then the return value will + /// always be `None`. + pub fn diff_entry(&self, i: usize) -> Option { + unsafe { + let ptr = raw::git_pathspec_match_list_diff_entry(&*self.raw, + i as size_t); + Binding::from_raw_opt(ptr as *mut _) + } + } + + /// Returns an iterator over the non-matching entries in this list. + pub fn failed_entries(&self) -> PathspecFailedEntries { + let n = self.failed_entrycount(); + let n = if n > 0 && self.failed_entry(0).is_none() {0} else {n}; + PathspecFailedEntries { range: 0..n, list: self } + } + + /// Get an original pathspec string that had no matches. + pub fn failed_entry(&self, i: usize) -> Option<&[u8]> { + unsafe { + let ptr = raw::git_pathspec_match_list_failed_entry(&*self.raw, + i as size_t); + ::opt_bytes(self, ptr) + } + } +} + +impl<'ps> Binding for PathspecMatchList<'ps> { + type Raw = *mut raw::git_pathspec_match_list; + + unsafe fn from_raw(raw: *mut raw::git_pathspec_match_list) + -> PathspecMatchList<'ps> { + PathspecMatchList { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *mut raw::git_pathspec_match_list { self.raw } +} + +impl<'ps> Drop for PathspecMatchList<'ps> { + fn drop(&mut self) { + unsafe { raw::git_pathspec_match_list_free(self.raw) } + } +} + +impl<'list> Iterator for PathspecEntries<'list> { + type Item = &'list [u8]; + fn next(&mut self) -> Option<&'list [u8]> { + self.range.next().and_then(|i| self.list.entry(i)) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +impl<'list> DoubleEndedIterator for PathspecEntries<'list> { + fn next_back(&mut self) -> Option<&'list [u8]> { + self.range.next_back().and_then(|i| self.list.entry(i)) + } +} +impl<'list> ExactSizeIterator for PathspecEntries<'list> {} + +impl<'list> Iterator for PathspecDiffEntries<'list> { + type Item = DiffDelta<'list>; + fn next(&mut self) -> Option> { + self.range.next().and_then(|i| self.list.diff_entry(i)) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +impl<'list> DoubleEndedIterator for PathspecDiffEntries<'list> { + fn next_back(&mut self) -> Option> { + self.range.next_back().and_then(|i| self.list.diff_entry(i)) + } +} +impl<'list> ExactSizeIterator for PathspecDiffEntries<'list> {} + +impl<'list> Iterator for PathspecFailedEntries<'list> { + type Item = &'list [u8]; + fn next(&mut self) -> Option<&'list [u8]> { + self.range.next().and_then(|i| self.list.failed_entry(i)) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +impl<'list> DoubleEndedIterator for PathspecFailedEntries<'list> { + fn next_back(&mut self) -> Option<&'list [u8]> { + self.range.next_back().and_then(|i| self.list.failed_entry(i)) + } +} +impl<'list> ExactSizeIterator for PathspecFailedEntries<'list> {} + +#[cfg(test)] +mod tests { + use PATHSPEC_DEFAULT; + use super::Pathspec; + use std::fs::File; + use std::path::Path; + + #[test] + fn smoke() { + let ps = Pathspec::new(["a"].iter()).unwrap(); + assert!(ps.matches_path(Path::new("a"), PATHSPEC_DEFAULT)); + assert!(ps.matches_path(Path::new("a/b"), PATHSPEC_DEFAULT)); + assert!(!ps.matches_path(Path::new("b"), PATHSPEC_DEFAULT)); + assert!(!ps.matches_path(Path::new("ab/c"), PATHSPEC_DEFAULT)); + + let (td, repo) = ::test::repo_init(); + let list = ps.match_workdir(&repo, PATHSPEC_DEFAULT).unwrap(); + assert_eq!(list.entries().len(), 0); + assert_eq!(list.diff_entries().len(), 0); + assert_eq!(list.failed_entries().len(), 0); + + File::create(&td.path().join("a")).unwrap(); + + let list = ps.match_workdir(&repo, ::PATHSPEC_FIND_FAILURES).unwrap(); + assert_eq!(list.entries().len(), 1); + assert_eq!(list.entries().next(), Some("a".as_bytes())); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/proxy_options.rs cargo-0.19.0/vendor/git2-0.6.4/src/proxy_options.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/proxy_options.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/proxy_options.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,58 @@ +use std::ffi::CString; +use std::marker; + +use raw; +use util::Binding; + +/// Options which can be specified to various fetch operations. +pub struct ProxyOptions<'a> { + url: Option, + proxy_kind: raw::git_proxy_t, + _marker: marker::PhantomData<&'a i32>, +} + +impl<'a> ProxyOptions<'a> { + /// Creates a new set of proxy options ready to be configured. + pub fn new() -> ProxyOptions<'a> { + ProxyOptions { + url: None, + proxy_kind: raw::GIT_PROXY_NONE, + _marker: marker::PhantomData, + } + } + + /// Try to auto-detect the proxy from the git configuration. + /// + /// Note that this will override `url` specified before. + pub fn auto(&mut self) -> &mut Self { + self.proxy_kind = raw::GIT_PROXY_AUTO; + self + } + + /// Specify the exact URL of the proxy to use. + /// + /// Note that this will override `auto` specified before. + pub fn url(&mut self, url: &str) -> &mut Self { + self.proxy_kind = raw::GIT_PROXY_SPECIFIED; + self.url = Some(CString::new(url).unwrap()); + self + } +} + +impl<'a> Binding for ProxyOptions<'a> { + type Raw = raw::git_proxy_options; + unsafe fn from_raw(_raw: raw::git_proxy_options) -> ProxyOptions<'a> { + panic!("can't create proxy from raw options") + } + + fn raw(&self) -> raw::git_proxy_options { + raw::git_proxy_options { + version: raw::GIT_PROXY_OPTIONS_VERSION, + kind: self.proxy_kind, + url: self.url.as_ref().map(|s| s.as_ptr()).unwrap_or(0 as *const _), + credentials: None, + certificate_check: None, + payload: 0 as *mut _, + } + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/reference.rs cargo-0.19.0/vendor/git2-0.6.4/src/reference.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/reference.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/reference.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,354 @@ +use std::cmp::Ordering; +use std::ffi::CString; +use std::marker; +use std::mem; +use std::str; +use libc; + +use {raw, Error, Oid, Repository, Object, ObjectType}; +use util::Binding; + +struct Refdb<'repo>(&'repo Repository); + +/// A structure to represent a git [reference][1]. +/// +/// [1]: http://git-scm.com/book/en/Git-Internals-Git-References +pub struct Reference<'repo> { + raw: *mut raw::git_reference, + _marker: marker::PhantomData>, +} + +/// An iterator over the references in a repository. +pub struct References<'repo> { + raw: *mut raw::git_reference_iterator, + _marker: marker::PhantomData>, +} + +/// An iterator over the names of references in a repository. +pub struct ReferenceNames<'repo> { + inner: References<'repo>, +} + +impl<'repo> Reference<'repo> { + /// Ensure the reference name is well-formed. + pub fn is_valid_name(refname: &str) -> bool { + ::init(); + let refname = CString::new(refname).unwrap(); + unsafe { raw::git_reference_is_valid_name(refname.as_ptr()) == 1 } + } + + /// Get access to the underlying raw pointer. + pub fn raw(&self) -> *mut raw::git_reference { self.raw } + + /// Delete an existing reference. + /// + /// This method works for both direct and symbolic references. The reference + /// will be immediately removed on disk. + /// + /// This function will return an error if the reference has changed from the + /// time it was looked up. + pub fn delete(&mut self) -> Result<(), Error> { + unsafe { try_call!(raw::git_reference_delete(self.raw)); } + Ok(()) + } + + /// Check if a reference is a local branch. + pub fn is_branch(&self) -> bool { + unsafe { raw::git_reference_is_branch(&*self.raw) == 1 } + } + + /// Check if a reference is a note. + pub fn is_note(&self) -> bool { + unsafe { raw::git_reference_is_note(&*self.raw) == 1 } + } + + /// Check if a reference is a remote tracking branch + pub fn is_remote(&self) -> bool { + unsafe { raw::git_reference_is_remote(&*self.raw) == 1 } + } + + /// Check if a reference is a tag + pub fn is_tag(&self) -> bool { + unsafe { raw::git_reference_is_tag(&*self.raw) == 1 } + } + + /// Get the full name of a reference. + /// + /// Returns `None` if the name is not valid utf-8. + pub fn name(&self) -> Option<&str> { str::from_utf8(self.name_bytes()).ok() } + + /// Get the full name of a reference. + pub fn name_bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, raw::git_reference_name(&*self.raw)).unwrap() } + } + + /// Get the full shorthand of a reference. + /// + /// This will transform the reference name into a name "human-readable" + /// version. If no shortname is appropriate, it will return the full name. + /// + /// Returns `None` if the shorthand is not valid utf-8. + pub fn shorthand(&self) -> Option<&str> { + str::from_utf8(self.shorthand_bytes()).ok() + } + + /// Get the full shorthand of a reference. + pub fn shorthand_bytes(&self) -> &[u8] { + unsafe { + ::opt_bytes(self, raw::git_reference_shorthand(&*self.raw)).unwrap() + } + } + + /// Get the OID pointed to by a direct reference. + /// + /// Only available if the reference is direct (i.e. an object id reference, + /// not a symbolic one). + pub fn target(&self) -> Option { + unsafe { + Binding::from_raw_opt(raw::git_reference_target(&*self.raw)) + } + } + + /// Return the peeled OID target of this reference. + /// + /// This peeled OID only applies to direct references that point to a hard + /// Tag object: it is the result of peeling such Tag. + pub fn target_peel(&self) -> Option { + unsafe { + Binding::from_raw_opt(raw::git_reference_target_peel(&*self.raw)) + } + } + + /// Get full name to the reference pointed to by a symbolic reference. + /// + /// May return `None` if the reference is either not symbolic or not a + /// valid utf-8 string. + pub fn symbolic_target(&self) -> Option<&str> { + self.symbolic_target_bytes().and_then(|s| str::from_utf8(s).ok()) + } + + /// Get full name to the reference pointed to by a symbolic reference. + /// + /// Only available if the reference is symbolic. + pub fn symbolic_target_bytes(&self) -> Option<&[u8]> { + unsafe { ::opt_bytes(self, raw::git_reference_symbolic_target(&*self.raw)) } + } + + /// Resolve a symbolic reference to a direct reference. + /// + /// This method iteratively peels a symbolic reference until it resolves to + /// a direct reference to an OID. + /// + /// If a direct reference is passed as an argument, a copy of that + /// reference is returned. + pub fn resolve(&self) -> Result, Error> { + let mut raw = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_reference_resolve(&mut raw, &*self.raw)); + Ok(Binding::from_raw(raw)) + } + } + + /// Peel a reference to an object + /// + /// This method recursively peels the reference until it reaches + /// an object of the specified type. + pub fn peel(&self, kind: ObjectType) -> Result, Error> { + let mut raw = 0 as *mut raw::git_object; + unsafe { + try_call!(raw::git_reference_peel(&mut raw, self.raw, kind)); + Ok(Binding::from_raw(raw)) + } + } + + /// Rename an existing reference. + /// + /// This method works for both direct and symbolic references. + /// + /// If the force flag is not enabled, and there's already a reference with + /// the given name, the renaming will fail. + pub fn rename(&mut self, new_name: &str, force: bool, + msg: &str) -> Result, Error> { + let mut raw = 0 as *mut raw::git_reference; + let new_name = try!(CString::new(new_name)); + let msg = try!(CString::new(msg)); + unsafe { + try_call!(raw::git_reference_rename(&mut raw, self.raw, new_name, + force, msg)); + Ok(Binding::from_raw(raw)) + } + } + + /// Conditionally create a new reference with the same name as the given + /// reference but a different OID target. The reference must be a direct + /// reference, otherwise this will fail. + /// + /// The new reference will be written to disk, overwriting the given + /// reference. + pub fn set_target(&mut self, id: Oid, reflog_msg: &str) + -> Result, Error> { + let mut raw = 0 as *mut raw::git_reference; + let msg = try!(CString::new(reflog_msg)); + unsafe { + try_call!(raw::git_reference_set_target(&mut raw, self.raw, + id.raw(), msg)); + Ok(Binding::from_raw(raw)) + } + } + +} + +impl<'repo> PartialOrd for Reference<'repo> { + fn partial_cmp(&self, other: &Reference<'repo>) -> Option { + Some(self.cmp(other)) + } +} + +impl<'repo> Ord for Reference<'repo> { + fn cmp(&self, other: &Reference<'repo>) -> Ordering { + match unsafe { raw::git_reference_cmp(&*self.raw, &*other.raw) } { + 0 => Ordering::Equal, + n if n < 0 => Ordering::Less, + _ => Ordering::Greater, + } + } +} + +impl<'repo> PartialEq for Reference<'repo> { + fn eq(&self, other: &Reference<'repo>) -> bool { + self.cmp(other) == Ordering::Equal + } +} + +impl<'repo> Eq for Reference<'repo> {} + +impl<'repo> Binding for Reference<'repo> { + type Raw = *mut raw::git_reference; + unsafe fn from_raw(raw: *mut raw::git_reference) -> Reference<'repo> { + Reference { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *mut raw::git_reference { self.raw } +} + +impl<'repo> Drop for Reference<'repo> { + fn drop(&mut self) { + unsafe { raw::git_reference_free(self.raw) } + } +} + +impl<'repo> References<'repo> { + /// Consumes a `References` iterator to create an iterator over just the + /// name of some references. + /// + /// This is more efficient if only the names are desired of references as + /// the references themselves don't have to be allocated and deallocated. + /// + /// The returned iterator will yield strings as opposed to a `Reference`. + pub fn names(self) -> ReferenceNames<'repo> { + ReferenceNames { inner: self } + } +} + +impl<'repo> Binding for References<'repo> { + type Raw = *mut raw::git_reference_iterator; + unsafe fn from_raw(raw: *mut raw::git_reference_iterator) + -> References<'repo> { + References { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *mut raw::git_reference_iterator { self.raw } +} + +impl<'repo> Iterator for References<'repo> { + type Item = Result, Error>; + fn next(&mut self) -> Option, Error>> { + let mut out = 0 as *mut raw::git_reference; + unsafe { + try_call_iter!(raw::git_reference_next(&mut out, self.raw)); + Some(Ok(Binding::from_raw(out))) + } + } +} + +impl<'repo> Drop for References<'repo> { + fn drop(&mut self) { + unsafe { raw::git_reference_iterator_free(self.raw) } + } +} + +impl<'repo> Iterator for ReferenceNames<'repo> { + type Item = Result<&'repo str, Error>; + fn next(&mut self) -> Option> { + let mut out = 0 as *const libc::c_char; + unsafe { + try_call_iter!(raw::git_reference_next_name(&mut out, + self.inner.raw)); + let bytes = ::opt_bytes(self, out).unwrap(); + let s = str::from_utf8(bytes).unwrap(); + Some(Ok(mem::transmute::<&str, &'repo str>(s))) + } + } +} + +#[cfg(test)] +mod tests { + use {Reference, ObjectType}; + + #[test] + fn smoke() { + assert!(Reference::is_valid_name("refs/foo")); + assert!(!Reference::is_valid_name("foo")); + } + + #[test] + fn smoke2() { + let (_td, repo) = ::test::repo_init(); + let mut head = repo.head().unwrap(); + assert!(head.is_branch()); + assert!(!head.is_remote()); + assert!(!head.is_tag()); + assert!(!head.is_note()); + + assert!(head == repo.head().unwrap()); + assert_eq!(head.name(), Some("refs/heads/master")); + + assert!(head == repo.find_reference("refs/heads/master").unwrap()); + assert_eq!(repo.refname_to_id("refs/heads/master").unwrap(), + head.target().unwrap()); + + assert!(head.symbolic_target().is_none()); + assert!(head.target_peel().is_none()); + + assert_eq!(head.shorthand(), Some("master")); + assert!(head.resolve().unwrap() == head); + + let mut tag1 = repo.reference("refs/tags/tag1", + head.target().unwrap(), + false, "test").unwrap(); + assert!(tag1.is_tag()); + + let peeled_commit = tag1.peel(ObjectType::Commit).unwrap(); + assert_eq!(ObjectType::Commit, peeled_commit.kind().unwrap()); + assert_eq!(tag1.target().unwrap(), peeled_commit.id()); + + tag1.delete().unwrap(); + + let mut sym1 = repo.reference_symbolic("refs/tags/tag1", + "refs/heads/master", false, + "test").unwrap(); + sym1.delete().unwrap(); + + { + assert!(repo.references().unwrap().count() == 1); + assert!(repo.references().unwrap().next().unwrap().unwrap() == head); + let mut names = repo.references().unwrap().names(); + assert_eq!(names.next().unwrap().unwrap(), "refs/heads/master"); + assert!(names.next().is_none()); + assert!(repo.references_glob("foo").unwrap().count() == 0); + assert!(repo.references_glob("refs/heads/*").unwrap().count() == 1); + } + + let mut head = head.rename("refs/foo", true, "test").unwrap(); + head.delete().unwrap(); + + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/reflog.rs cargo-0.19.0/vendor/git2-0.6.4/src/reflog.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/reflog.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/reflog.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,167 @@ +use std::ops::Range; +use std::marker; +use std::str; +use libc::size_t; + +use {raw, signature, Oid, Error, Signature}; +use util::Binding; + +/// A reference log of a git repository. +pub struct Reflog { + raw: *mut raw::git_reflog, +} + +/// An entry inside the reflog of a repository +pub struct ReflogEntry<'reflog> { + raw: *const raw::git_reflog_entry, + _marker: marker::PhantomData<&'reflog Reflog>, +} + +/// An iterator over the entries inside of a reflog. +pub struct ReflogIter<'reflog> { + range: Range, + reflog: &'reflog Reflog, +} + +impl Reflog { + /// Add a new entry to the in-memory reflog. + pub fn append(&mut self, new_oid: Oid, committer: &Signature, + msg: Option<&str>) -> Result<(), Error> { + let msg = try!(::opt_cstr(msg)); + unsafe { + try_call!(raw::git_reflog_append(self.raw, new_oid.raw(), + committer.raw(), msg)); + } + Ok(()) + } + + /// Remove an entry from the reflog by its index + /// + /// To ensure there's no gap in the log history, set rewrite_previous_entry + /// param value to `true`. When deleting entry n, member old_oid of entry + /// n-1 (if any) will be updated with the value of member new_oid of entry + /// n+1. + pub fn remove(&mut self, i: usize, rewrite_previous_entry: bool) + -> Result<(), Error> { + unsafe { + try_call!(raw::git_reflog_drop(self.raw, i as size_t, + rewrite_previous_entry)); + } + Ok(()) + } + + /// Lookup an entry by its index + /// + /// Requesting the reflog entry with an index of 0 (zero) will return the + /// most recently created entry. + pub fn get(&self, i: usize) -> Option { + unsafe { + let ptr = raw::git_reflog_entry_byindex(self.raw, i as size_t); + Binding::from_raw_opt(ptr) + } + } + + /// Get the number of log entries in a reflog + pub fn len(&self) -> usize { + unsafe { raw::git_reflog_entrycount(self.raw) as usize } + } + + /// Get an iterator to all entries inside of this reflog + pub fn iter(&self) -> ReflogIter { + ReflogIter { range: 0..self.len(), reflog: self } + } + + /// Write an existing in-memory reflog object back to disk using an atomic + /// file lock. + pub fn write(&mut self) -> Result<(), Error> { + unsafe { try_call!(raw::git_reflog_write(self.raw)); } + Ok(()) + } +} + +impl Binding for Reflog { + type Raw = *mut raw::git_reflog; + + unsafe fn from_raw(raw: *mut raw::git_reflog) -> Reflog { + Reflog { raw: raw } + } + fn raw(&self) -> *mut raw::git_reflog { self.raw } +} + +impl Drop for Reflog { + fn drop(&mut self) { + unsafe { raw::git_reflog_free(self.raw) } + } +} + +impl<'reflog> ReflogEntry<'reflog> { + /// Get the committer of this entry + pub fn committer(&self) -> Signature { + unsafe { + let ptr = raw::git_reflog_entry_committer(self.raw); + signature::from_raw_const(self, ptr) + } + } + + /// Get the new oid + pub fn id_new(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_reflog_entry_id_new(self.raw)) } + } + + /// Get the old oid + pub fn id_old(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_reflog_entry_id_new(self.raw)) } + } + + /// Get the log message, returning `None` on invalid UTF-8. + pub fn message(&self) -> Option<&str> { + self.message_bytes().and_then(|s| str::from_utf8(s).ok()) + } + + /// Get the log message as a byte array. + pub fn message_bytes(&self) -> Option<&[u8]> { + unsafe { + ::opt_bytes(self, raw::git_reflog_entry_message(self.raw)) + } + } +} + +impl<'reflog> Binding for ReflogEntry<'reflog> { + type Raw = *const raw::git_reflog_entry; + + unsafe fn from_raw(raw: *const raw::git_reflog_entry) -> ReflogEntry<'reflog> { + ReflogEntry { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *const raw::git_reflog_entry { self.raw } +} + +impl<'reflog> Iterator for ReflogIter<'reflog> { + type Item = ReflogEntry<'reflog>; + fn next(&mut self) -> Option> { + self.range.next().and_then(|i| self.reflog.get(i)) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +impl<'reflog> DoubleEndedIterator for ReflogIter<'reflog> { + fn next_back(&mut self) -> Option> { + self.range.next_back().and_then(|i| self.reflog.get(i)) + } +} +impl<'reflog> ExactSizeIterator for ReflogIter<'reflog> {} + +#[cfg(test)] +mod tests { + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + let mut reflog = repo.reflog("HEAD").unwrap(); + assert_eq!(reflog.iter().len(), 1); + reflog.write().unwrap(); + + let entry = reflog.iter().next().unwrap(); + assert!(entry.message().is_some()); + + repo.reflog_rename("HEAD", "refs/heads/foo").unwrap(); + repo.reflog_delete("refs/heads/foo").unwrap(); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/refspec.rs cargo-0.19.0/vendor/git2-0.6.4/src/refspec.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/refspec.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/refspec.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,89 @@ +use std::ffi::CString; +use std::marker; +use std::str; + +use {raw, Direction}; +use util::Binding; + +/// A structure to represent a git [refspec][1]. +/// +/// Refspecs are currently mainly accessed/created through a `Remote`. +/// +/// [1]: http://git-scm.com/book/en/Git-Internals-The-Refspec +pub struct Refspec<'remote> { + raw: *const raw::git_refspec, + _marker: marker::PhantomData<&'remote raw::git_remote>, +} + +impl<'remote> Refspec<'remote> { + /// Get the refspec's direction. + pub fn direction(&self) -> Direction { + match unsafe { raw::git_refspec_direction(self.raw) } { + raw::GIT_DIRECTION_FETCH => Direction::Fetch, + raw::GIT_DIRECTION_PUSH => Direction::Push, + n => panic!("unknown refspec direction: {}", n), + } + } + + /// Get the destination specifier. + /// + /// If the destination is not utf-8, None is returned. + pub fn dst(&self) -> Option<&str> { + str::from_utf8(self.dst_bytes()).ok() + } + + /// Get the destination specifier, in bytes. + pub fn dst_bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, raw::git_refspec_dst(self.raw)).unwrap() } + } + + /// Check if a refspec's destination descriptor matches a reference + pub fn dst_matches(&self, refname: &str) -> bool { + let refname = CString::new(refname).unwrap(); + unsafe { raw::git_refspec_dst_matches(self.raw, refname.as_ptr()) == 1 } + } + + /// Get the source specifier. + /// + /// If the source is not utf-8, None is returned. + pub fn src(&self) -> Option<&str> { + str::from_utf8(self.src_bytes()).ok() + } + + /// Get the source specifier, in bytes. + pub fn src_bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, raw::git_refspec_src(self.raw)).unwrap() } + } + + /// Check if a refspec's source descriptor matches a reference + pub fn src_matches(&self, refname: &str) -> bool { + let refname = CString::new(refname).unwrap(); + unsafe { raw::git_refspec_src_matches(self.raw, refname.as_ptr()) == 1 } + } + + /// Get the force update setting. + pub fn is_force(&self) -> bool { + unsafe { raw::git_refspec_force(self.raw) == 1 } + } + + /// Get the refspec's string. + /// + /// Returns None if the string is not valid utf8. + pub fn str(&self) -> Option<&str> { + str::from_utf8(self.bytes()).ok() + } + + /// Get the refspec's string as a byte array + pub fn bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, raw::git_refspec_string(self.raw)).unwrap() } + } +} + +impl<'remote> Binding for Refspec<'remote> { + type Raw = *const raw::git_refspec; + + unsafe fn from_raw(raw: *const raw::git_refspec) -> Refspec<'remote> { + Refspec { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *const raw::git_refspec { self.raw } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/remote_callbacks.rs cargo-0.19.0/vendor/git2-0.6.4/src/remote_callbacks.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/remote_callbacks.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/remote_callbacks.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,382 @@ +use std::ffi::CStr; +use std::marker; +use std::mem; +use std::slice; +use std::str; +use libc::{c_void, c_int, c_char, c_uint}; + +use {raw, panic, Error, Cred, CredentialType, Oid}; +use cert::Cert; +use util::Binding; + +/// A structure to contain the callbacks which are invoked when a repository is +/// being updated or downloaded. +/// +/// These callbacks are used to manage facilities such as authentication, +/// transfer progress, etc. +pub struct RemoteCallbacks<'a> { + progress: Option>>, + credentials: Option>>, + sideband_progress: Option>>, + update_tips: Option>>, + certificate_check: Option>>, + push_update_reference: Option>>, +} + +/// Struct representing the progress by an in-flight transfer. +pub struct Progress<'a> { + raw: ProgressState, + _marker: marker::PhantomData<&'a raw::git_transfer_progress>, +} + +enum ProgressState { + Borrowed(*const raw::git_transfer_progress), + Owned(raw::git_transfer_progress), +} + +/// Callback used to acquire credentials for when a remote is fetched. +/// +/// * `url` - the resource for which the credentials are required. +/// * `username_from_url` - the username that was embedded in the url, or `None` +/// if it was not included. +/// * `allowed_types` - a bitmask stating which cred types are ok to return. +pub type Credentials<'a> = FnMut(&str, Option<&str>, CredentialType) + -> Result + 'a; + +/// Callback to be invoked while a transfer is in progress. +/// +/// This callback will be periodically called with updates to the progress of +/// the transfer so far. The return value indicates whether the transfer should +/// continue. A return value of `false` will cancel the transfer. +/// +/// * `progress` - the progress being made so far. +pub type TransferProgress<'a> = FnMut(Progress) -> bool + 'a; + +/// Callback for receiving messages delivered by the transport. +/// +/// The return value indicates whether the network operation should continue. +pub type TransportMessage<'a> = FnMut(&[u8]) -> bool + 'a; + +/// Callback for whenever a reference is updated locally. +pub type UpdateTips<'a> = FnMut(&str, Oid, Oid) -> bool + 'a; + +/// Callback for a custom certificate check. +/// +/// The first argument is the certificate receved on the connection. +/// Certificates are typically either an SSH or X509 certificate. +/// +/// The second argument is the hostname for the connection is passed as the last +/// argument. +pub type CertificateCheck<'a> = FnMut(&Cert, &str) -> bool + 'a; + +/// Callback for each updated reference on push. +/// +/// The first argument here is the `refname` of the reference, and the second is +/// the status message sent by a server. If the status is `Some` then the update +/// was rejected by the remote server with a reason why. +pub type PushUpdateReference<'a> = FnMut(&str, Option<&str>) -> Result<(), Error> + 'a; + +impl<'a> RemoteCallbacks<'a> { + /// Creates a new set of empty callbacks + pub fn new() -> RemoteCallbacks<'a> { + RemoteCallbacks { + credentials: None, + progress: None, + sideband_progress: None, + update_tips: None, + certificate_check: None, + push_update_reference: None, + } + } + + /// The callback through which to fetch credentials if required. + pub fn credentials(&mut self, cb: F) -> &mut RemoteCallbacks<'a> + where F: FnMut(&str, Option<&str>, CredentialType) + -> Result + 'a + { + self.credentials = Some(Box::new(cb) as Box>); + self + } + + /// The callback through which progress is monitored. + pub fn transfer_progress(&mut self, cb: F) -> &mut RemoteCallbacks<'a> + where F: FnMut(Progress) -> bool + 'a { + self.progress = Some(Box::new(cb) as Box>); + self + } + + /// Textual progress from the remote. + /// + /// Text sent over the progress side-band will be passed to this function + /// (this is the 'counting objects' output. + pub fn sideband_progress(&mut self, cb: F) -> &mut RemoteCallbacks<'a> + where F: FnMut(&[u8]) -> bool + 'a { + self.sideband_progress = Some(Box::new(cb) as Box>); + self + } + + /// Each time a reference is updated locally, the callback will be called + /// with information about it. + pub fn update_tips(&mut self, cb: F) -> &mut RemoteCallbacks<'a> + where F: FnMut(&str, Oid, Oid) -> bool + 'a { + self.update_tips = Some(Box::new(cb) as Box>); + self + } + + /// If certificate verification fails, then this callback will be invoked to + /// let the caller make the final decision of whether to allow the + /// connection to proceed. + pub fn certificate_check(&mut self, cb: F) -> &mut RemoteCallbacks<'a> + where F: FnMut(&Cert, &str) -> bool + 'a + { + self.certificate_check = Some(Box::new(cb) as Box>); + self + } + + /// Set a callback to get invoked for each updated reference on a push. + /// + /// The first argument to the callback is the name of the reference and the + /// second is a status message sent by the server. If the status is `Some` + /// then the push was rejected. + pub fn push_update_reference(&mut self, cb: F) -> &mut RemoteCallbacks<'a> + where F: FnMut(&str, Option<&str>) -> Result<(), Error> + 'a, + { + self.push_update_reference = Some(Box::new(cb) as Box>); + self + } +} + +impl<'a> Binding for RemoteCallbacks<'a> { + type Raw = raw::git_remote_callbacks; + unsafe fn from_raw(_raw: raw::git_remote_callbacks) -> RemoteCallbacks<'a> { + panic!("unimplemented"); + } + + fn raw(&self) -> raw::git_remote_callbacks { + unsafe { + let mut callbacks: raw::git_remote_callbacks = mem::zeroed(); + assert_eq!(raw::git_remote_init_callbacks(&mut callbacks, + raw::GIT_REMOTE_CALLBACKS_VERSION), 0); + if self.progress.is_some() { + let f: raw::git_transfer_progress_cb = transfer_progress_cb; + callbacks.transfer_progress = Some(f); + } + if self.credentials.is_some() { + let f: raw::git_cred_acquire_cb = credentials_cb; + callbacks.credentials = Some(f); + } + if self.sideband_progress.is_some() { + let f: raw::git_transport_message_cb = sideband_progress_cb; + callbacks.sideband_progress = Some(f); + } + if self.certificate_check.is_some() { + let f: raw::git_transport_certificate_check_cb = + certificate_check_cb; + callbacks.certificate_check = Some(f); + } + if self.push_update_reference.is_some() { + let f: extern fn(_, _, _) -> c_int = push_update_reference_cb; + callbacks.push_update_reference = Some(f); + } + if self.update_tips.is_some() { + let f: extern fn(*const c_char, *const raw::git_oid, + *const raw::git_oid, *mut c_void) -> c_int + = update_tips_cb; + callbacks.update_tips = Some(f); + } + callbacks.payload = self as *const _ as *mut _; + return callbacks; + } + } +} + +impl<'a> Progress<'a> { + /// Number of objects in the packfile being downloaded + pub fn total_objects(&self) -> usize { + unsafe { (*self.raw()).total_objects as usize } + } + /// Received objects that have been hashed + pub fn indexed_objects(&self) -> usize { + unsafe { (*self.raw()).indexed_objects as usize } + } + /// Objects which have been downloaded + pub fn received_objects(&self) -> usize { + unsafe { (*self.raw()).received_objects as usize } + } + /// Locally-available objects that have been injected in order to fix a thin + /// pack. + pub fn local_objects(&self) -> usize { + unsafe { (*self.raw()).local_objects as usize } + } + /// Number of deltas in the packfile being downloaded + pub fn total_deltas(&self) -> usize { + unsafe { (*self.raw()).total_deltas as usize } + } + /// Received deltas that have been hashed. + pub fn indexed_deltas(&self) -> usize { + unsafe { (*self.raw()).indexed_deltas as usize } + } + /// Size of the packfile received up to now + pub fn received_bytes(&self) -> usize { + unsafe { (*self.raw()).received_bytes as usize } + } + + /// Convert this to an owned version of `Progress`. + pub fn to_owned(&self) -> Progress<'static> { + Progress { + raw: ProgressState::Owned(unsafe { *self.raw() }), + _marker: marker::PhantomData, + } + } +} + +impl<'a> Binding for Progress<'a> { + type Raw = *const raw::git_transfer_progress; + unsafe fn from_raw(raw: *const raw::git_transfer_progress) + -> Progress<'a> { + Progress { + raw: ProgressState::Borrowed(raw), + _marker: marker::PhantomData, + } + } + + fn raw(&self) -> *const raw::git_transfer_progress { + match self.raw { + ProgressState::Borrowed(raw) => raw, + ProgressState::Owned(ref raw) => raw as *const _, + } + } +} + +extern fn credentials_cb(ret: *mut *mut raw::git_cred, + url: *const c_char, + username_from_url: *const c_char, + allowed_types: c_uint, + payload: *mut c_void) -> c_int { + unsafe { + let ok = panic::wrap(|| { + let payload = &mut *(payload as *mut RemoteCallbacks); + let callback = try!(payload.credentials.as_mut() + .ok_or(raw::GIT_PASSTHROUGH as c_int)); + *ret = 0 as *mut raw::git_cred; + let url = try!(str::from_utf8(CStr::from_ptr(url).to_bytes()) + .map_err(|_| raw::GIT_PASSTHROUGH as c_int)); + let username_from_url = match ::opt_bytes(&url, username_from_url) { + Some(username) => { + Some(try!(str::from_utf8(username) + .map_err(|_| raw::GIT_PASSTHROUGH as c_int))) + } + None => None, + }; + + let cred_type = CredentialType::from_bits_truncate(allowed_types as u32); + + callback(url, username_from_url, cred_type).map_err(|e| { + e.raw_code() as c_int + }) + }); + match ok { + Some(Ok(cred)) => { + // Turns out it's a memory safety issue if we pass through any + // and all credentials into libgit2 + if allowed_types & (cred.credtype() as c_uint) != 0 { + *ret = cred.unwrap(); + 0 + } else { + raw::GIT_PASSTHROUGH as c_int + } + } + Some(Err(e)) => e, + None => -1, + } + } +} + +extern fn transfer_progress_cb(stats: *const raw::git_transfer_progress, + payload: *mut c_void) -> c_int { + let ok = panic::wrap(|| unsafe { + let payload = &mut *(payload as *mut RemoteCallbacks); + let callback = match payload.progress { + Some(ref mut c) => c, + None => return true, + }; + let progress = Binding::from_raw(stats); + callback(progress) + }); + if ok == Some(true) {0} else {-1} +} + +extern fn sideband_progress_cb(str: *const c_char, + len: c_int, + payload: *mut c_void) -> c_int { + let ok = panic::wrap(|| unsafe { + let payload = &mut *(payload as *mut RemoteCallbacks); + let callback = match payload.sideband_progress { + Some(ref mut c) => c, + None => return true, + }; + let buf = slice::from_raw_parts(str as *const u8, len as usize); + callback(buf) + }); + if ok == Some(true) {0} else {-1} +} + +extern fn update_tips_cb(refname: *const c_char, + a: *const raw::git_oid, + b: *const raw::git_oid, + data: *mut c_void) -> c_int { + let ok = panic::wrap(|| unsafe { + let payload = &mut *(data as *mut RemoteCallbacks); + let callback = match payload.update_tips { + Some(ref mut c) => c, + None => return true, + }; + let refname = str::from_utf8(CStr::from_ptr(refname).to_bytes()) + .unwrap(); + let a = Binding::from_raw(a); + let b = Binding::from_raw(b); + callback(refname, a, b) + }); + if ok == Some(true) {0} else {-1} +} + +extern fn certificate_check_cb(cert: *mut raw::git_cert, + _valid: c_int, + hostname: *const c_char, + data: *mut c_void) -> c_int { + let ok = panic::wrap(|| unsafe { + let payload = &mut *(data as *mut RemoteCallbacks); + let callback = match payload.certificate_check { + Some(ref mut c) => c, + None => return true, + }; + let cert = Binding::from_raw(cert); + let hostname = str::from_utf8(CStr::from_ptr(hostname).to_bytes()) + .unwrap(); + callback(&cert, hostname) + }); + if ok == Some(true) {0} else {-1} +} + +extern fn push_update_reference_cb(refname: *const c_char, + status: *const c_char, + data: *mut c_void) -> c_int { + panic::wrap(|| unsafe { + let payload = &mut *(data as *mut RemoteCallbacks); + let callback = match payload.push_update_reference { + Some(ref mut c) => c, + None => return 0, + }; + let refname = str::from_utf8(CStr::from_ptr(refname).to_bytes()) + .unwrap(); + let status = if status.is_null() { + None + } else { + Some(str::from_utf8(CStr::from_ptr(status).to_bytes()).unwrap()) + }; + match callback(refname, status) { + Ok(()) => 0, + Err(e) => e.raw_code(), + } + }).unwrap_or(-1) +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/remote.rs cargo-0.19.0/vendor/git2-0.6.4/src/remote.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/remote.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/remote.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,604 @@ +use std::ffi::CString; +use std::ops::Range; +use std::marker; +use std::mem; +use std::slice; +use std::str; +use libc; + +use {raw, Direction, Error, Refspec, Oid, FetchPrune, ProxyOptions}; +use {RemoteCallbacks, Progress, Repository, AutotagOption}; +use util::Binding; + +/// A structure representing a [remote][1] of a git repository. +/// +/// [1]: http://git-scm.com/book/en/Git-Basics-Working-with-Remotes +/// +/// The lifetime is the lifetime of the repository that it is attached to. The +/// remote is used to manage fetches and pushes as well as refspecs. +pub struct Remote<'repo> { + raw: *mut raw::git_remote, + _marker: marker::PhantomData<&'repo Repository>, +} + +/// An iterator over the refspecs that a remote contains. +pub struct Refspecs<'remote> { + range: Range, + remote: &'remote Remote<'remote>, +} + +/// Description of a reference advertised bya remote server, given out on calls +/// to `list`. +pub struct RemoteHead<'remote> { + raw: *const raw::git_remote_head, + _marker: marker::PhantomData<&'remote str>, +} + +/// Options which can be specified to various fetch operations. +pub struct FetchOptions<'cb> { + callbacks: Option>, + proxy: Option>, + prune: FetchPrune, + update_fetchhead: bool, + download_tags: AutotagOption, +} + +/// Options to control the behavior of a git push. +pub struct PushOptions<'cb> { + callbacks: Option>, + proxy: Option>, + pb_parallelism: u32, +} + +impl<'repo> Remote<'repo> { + /// Ensure the remote name is well-formed. + pub fn is_valid_name(remote_name: &str) -> bool { + ::init(); + let remote_name = CString::new(remote_name).unwrap(); + unsafe { raw::git_remote_is_valid_name(remote_name.as_ptr()) == 1 } + } + + /// Get the remote's name. + /// + /// Returns `None` if this remote has not yet been named or if the name is + /// not valid utf-8 + pub fn name(&self) -> Option<&str> { + self.name_bytes().and_then(|s| str::from_utf8(s).ok()) + } + + /// Get the remote's name, in bytes. + /// + /// Returns `None` if this remote has not yet been named + pub fn name_bytes(&self) -> Option<&[u8]> { + unsafe { ::opt_bytes(self, raw::git_remote_name(&*self.raw)) } + } + + /// Get the remote's url. + /// + /// Returns `None` if the url is not valid utf-8 + pub fn url(&self) -> Option<&str> { + str::from_utf8(self.url_bytes()).ok() + } + + /// Get the remote's url as a byte array. + pub fn url_bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, raw::git_remote_url(&*self.raw)).unwrap() } + } + + /// Get the remote's pushurl. + /// + /// Returns `None` if the pushurl is not valid utf-8 + pub fn pushurl(&self) -> Option<&str> { + self.pushurl_bytes().and_then(|s| str::from_utf8(s).ok()) + } + + /// Get the remote's pushurl as a byte array. + pub fn pushurl_bytes(&self) -> Option<&[u8]> { + unsafe { ::opt_bytes(self, raw::git_remote_pushurl(&*self.raw)) } + } + + /// Open a connection to a remote. + pub fn connect(&mut self, dir: Direction) -> Result<(), Error> { + // TODO: can callbacks be exposed safely? + unsafe { + try_call!(raw::git_remote_connect(self.raw, dir, + 0 as *const _, + 0 as *const _, + 0 as *const _)); + } + Ok(()) + } + + /// Check whether the remote is connected + pub fn connected(&mut self) -> bool { + unsafe { raw::git_remote_connected(self.raw) == 1 } + } + + /// Disconnect from the remote + pub fn disconnect(&mut self) { + unsafe { raw::git_remote_disconnect(self.raw) } + } + + /// Download and index the packfile + /// + /// Connect to the remote if it hasn't been done yet, negotiate with the + /// remote git which objects are missing, download and index the packfile. + /// + /// The .idx file will be created and both it and the packfile with be + /// renamed to their final name. + /// + /// The `specs` argument is a list of refspecs to use for this negotiation + /// and download. Use an empty array to use the base refspecs. + pub fn download(&mut self, specs: &[&str], opts: Option<&mut FetchOptions>) + -> Result<(), Error> { + let (_a, _b, arr) = try!(::util::iter2cstrs(specs.iter())); + let raw = opts.map(|o| o.raw()); + unsafe { + try_call!(raw::git_remote_download(self.raw, &arr, raw.as_ref())); + } + Ok(()) + } + + /// Get the number of refspecs for a remote + pub fn refspecs<'a>(&'a self) -> Refspecs<'a> { + let cnt = unsafe { raw::git_remote_refspec_count(&*self.raw) as usize }; + Refspecs { range: 0..cnt, remote: self } + } + + /// Get the `nth` refspec from this remote. + /// + /// The `refspecs` iterator can be used to iterate over all refspecs. + pub fn get_refspec(&self, i: usize) -> Option> { + unsafe { + let ptr = raw::git_remote_get_refspec(&*self.raw, + i as libc::size_t); + Binding::from_raw_opt(ptr) + } + } + + /// Download new data and update tips + /// + /// Convenience function to connect to a remote, download the data, + /// disconnect and update the remote-tracking branches. + pub fn fetch(&mut self, + refspecs: &[&str], + opts: Option<&mut FetchOptions>, + reflog_msg: Option<&str>) -> Result<(), Error> { + let (_a, _b, arr) = try!(::util::iter2cstrs(refspecs.iter())); + let msg = try!(::opt_cstr(reflog_msg)); + let raw = opts.map(|o| o.raw()); + unsafe { + try_call!(raw::git_remote_fetch(self.raw, &arr, raw.as_ref(), msg)); + } + Ok(()) + } + + /// Update the tips to the new state + pub fn update_tips(&mut self, + callbacks: Option<&mut RemoteCallbacks>, + update_fetchhead: bool, + download_tags: AutotagOption, + msg: Option<&str>) -> Result<(), Error> { + let msg = try!(::opt_cstr(msg)); + let cbs = callbacks.map(|cb| cb.raw()); + unsafe { + try_call!(raw::git_remote_update_tips(self.raw, cbs.as_ref(), + update_fetchhead, + download_tags, msg)); + } + Ok(()) + } + + /// Perform a push + /// + /// Perform all the steps for a push. If no refspecs are passed then the + /// configured refspecs will be used. + /// + /// Note that you'll likely want to use `RemoteCallbacks` and set + /// `push_update_reference` to test whether all the references were pushed + /// successfully. + pub fn push(&mut self, + refspecs: &[&str], + opts: Option<&mut PushOptions>) -> Result<(), Error> { + let (_a, _b, arr) = try!(::util::iter2cstrs(refspecs.iter())); + let raw = opts.map(|o| o.raw()); + unsafe { + try_call!(raw::git_remote_push(self.raw, &arr, raw.as_ref())); + } + Ok(()) + } + + /// Get the statistics structure that is filled in by the fetch operation. + pub fn stats(&self) -> Progress { + unsafe { + Binding::from_raw(raw::git_remote_stats(self.raw)) + } + } + + /// Get the remote repository's reference advertisement list. + /// + /// Get the list of references with which the server responds to a new + /// connection. + /// + /// The remote (or more exactly its transport) must have connected to the + /// remote repository. This list is available as soon as the connection to + /// the remote is initiated and it remains available after disconnecting. + pub fn list(&self) -> Result<&[RemoteHead], Error> { + let mut size = 0; + let mut base = 0 as *mut _; + unsafe { + try_call!(raw::git_remote_ls(&mut base, &mut size, self.raw)); + assert_eq!(mem::size_of::(), + mem::size_of::<*const raw::git_remote_head>()); + let slice = slice::from_raw_parts(base as *const _, size as usize); + Ok(mem::transmute::<&[*const raw::git_remote_head], + &[RemoteHead]>(slice)) + } + } +} + +impl<'repo> Clone for Remote<'repo> { + fn clone(&self) -> Remote<'repo> { + let mut ret = 0 as *mut raw::git_remote; + let rc = unsafe { call!(raw::git_remote_dup(&mut ret, self.raw)) }; + assert_eq!(rc, 0); + Remote { + raw: ret, + _marker: marker::PhantomData, + } + } +} + +impl<'repo> Binding for Remote<'repo> { + type Raw = *mut raw::git_remote; + + unsafe fn from_raw(raw: *mut raw::git_remote) -> Remote<'repo> { + Remote { + raw: raw, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *mut raw::git_remote { self.raw } +} + +impl<'repo> Drop for Remote<'repo> { + fn drop(&mut self) { + unsafe { raw::git_remote_free(self.raw) } + } +} + +impl<'repo> Iterator for Refspecs<'repo> { + type Item = Refspec<'repo>; + fn next(&mut self) -> Option> { + self.range.next().and_then(|i| self.remote.get_refspec(i)) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +impl<'repo> DoubleEndedIterator for Refspecs<'repo> { + fn next_back(&mut self) -> Option> { + self.range.next_back().and_then(|i| self.remote.get_refspec(i)) + } +} +impl<'repo> ExactSizeIterator for Refspecs<'repo> {} + +#[allow(missing_docs)] // not documented in libgit2 :( +impl<'remote> RemoteHead<'remote> { + /// Flag if this is available locally. + pub fn is_local(&self) -> bool { + unsafe { (*self.raw).local != 0 } + } + + pub fn oid(&self) -> Oid { + unsafe { Binding::from_raw(&(*self.raw).oid as *const _) } + } + pub fn loid(&self) -> Oid { + unsafe { Binding::from_raw(&(*self.raw).loid as *const _) } + } + + pub fn name(&self) -> &str { + let b = unsafe { ::opt_bytes(self, (*self.raw).name).unwrap() }; + str::from_utf8(b).unwrap() + } + + pub fn symref_target(&self) -> Option<&str> { + let b = unsafe { ::opt_bytes(self, (*self.raw).symref_target) }; + b.map(|b| str::from_utf8(b).unwrap()) + } +} + +impl<'cb> FetchOptions<'cb> { + /// Creates a new blank set of fetch options + pub fn new() -> FetchOptions<'cb> { + FetchOptions { + callbacks: None, + proxy: None, + prune: FetchPrune::Unspecified, + update_fetchhead: true, + download_tags: AutotagOption::Unspecified, + } + } + + /// Set the callbacks to use for the fetch operation. + pub fn remote_callbacks(&mut self, cbs: RemoteCallbacks<'cb>) -> &mut Self { + self.callbacks = Some(cbs); + self + } + + /// Set the proxy options to use for the fetch operation. + pub fn proxy_options(&mut self, opts: ProxyOptions<'cb>) -> &mut Self { + self.proxy = Some(opts); + self + } + + /// Set whether to perform a prune after the fetch. + pub fn prune(&mut self, prune: FetchPrune) -> &mut Self { + self.prune = prune; + self + } + + /// Set whether to write the results to FETCH_HEAD. + /// + /// Defaults to `true`. + pub fn update_fetchhead(&mut self, update: bool) -> &mut Self { + self.update_fetchhead = update; + self + } + + /// Set how to behave regarding tags on the remote, such as auto-downloading + /// tags for objects we're downloading or downloading all of them. + /// + /// The default is to auto-follow tags. + pub fn download_tags(&mut self, opt: AutotagOption) -> &mut Self { + self.download_tags = opt; + self + } +} + +impl<'cb> Binding for FetchOptions<'cb> { + type Raw = raw::git_fetch_options; + + unsafe fn from_raw(_raw: raw::git_fetch_options) -> FetchOptions<'cb> { + panic!("unimplemented"); + } + fn raw(&self) -> raw::git_fetch_options { + raw::git_fetch_options { + version: 1, + callbacks: self.callbacks.as_ref().map(|m| m.raw()) + .unwrap_or_else(|| RemoteCallbacks::new().raw()), + proxy_opts: self.proxy.as_ref().map(|m| m.raw()) + .unwrap_or_else(|| ProxyOptions::new().raw()), + prune: ::call::convert(&self.prune), + update_fetchhead: ::call::convert(&self.update_fetchhead), + download_tags: ::call::convert(&self.download_tags), + // TODO: expose this as a builder option + custom_headers: raw::git_strarray { + count: 0, + strings: 0 as *mut _, + }, + } + } +} + +impl<'cb> PushOptions<'cb> { + /// Creates a new blank set of push options + pub fn new() -> PushOptions<'cb> { + PushOptions { + callbacks: None, + proxy: None, + pb_parallelism: 1, + } + } + + /// Set the callbacks to use for the fetch operation. + pub fn remote_callbacks(&mut self, cbs: RemoteCallbacks<'cb>) -> &mut Self { + self.callbacks = Some(cbs); + self + } + + /// Set the proxy options to use for the fetch operation. + pub fn proxy_options(&mut self, opts: ProxyOptions<'cb>) -> &mut Self { + self.proxy = Some(opts); + self + } + + /// If the transport being used to push to the remote requires the creation + /// of a pack file, this controls the number of worker threads used by the + /// packbuilder when creating that pack file to be sent to the remote. + /// + /// if set to 0 the packbuilder will auto-detect the number of threads to + /// create, and the default value is 1. + pub fn packbuilder_parallelism(&mut self, parallel: u32) -> &mut Self { + self.pb_parallelism = parallel; + self + } +} + +impl<'cb> Binding for PushOptions<'cb> { + type Raw = raw::git_push_options; + + unsafe fn from_raw(_raw: raw::git_push_options) -> PushOptions<'cb> { + panic!("unimplemented"); + } + fn raw(&self) -> raw::git_push_options { + raw::git_push_options { + version: 1, + callbacks: self.callbacks.as_ref().map(|m| m.raw()) + .unwrap_or(RemoteCallbacks::new().raw()), + proxy_opts: self.proxy.as_ref().map(|m| m.raw()) + .unwrap_or_else(|| ProxyOptions::new().raw()), + pb_parallelism: self.pb_parallelism as libc::c_uint, + // TODO: expose this as a builder option + custom_headers: raw::git_strarray { + count: 0, + strings: 0 as *mut _, + }, + } + } +} + +#[cfg(test)] +mod tests { + use std::cell::Cell; + use tempdir::TempDir; + use {Repository, Remote, RemoteCallbacks, Direction, FetchOptions}; + use {AutotagOption, PushOptions}; + + #[test] + fn smoke() { + let (td, repo) = ::test::repo_init(); + t!(repo.remote("origin", "/path/to/nowhere")); + drop(repo); + + let repo = t!(Repository::init(td.path())); + let origin = t!(repo.find_remote("origin")); + assert_eq!(origin.name(), Some("origin")); + assert_eq!(origin.url(), Some("/path/to/nowhere")); + assert_eq!(origin.pushurl(), None); + + t!(repo.remote_set_url("origin", "/path/to/elsewhere")); + t!(repo.remote_set_pushurl("origin", Some("/path/to/elsewhere"))); + + let stats = origin.stats(); + assert_eq!(stats.total_objects(), 0); + } + + #[test] + fn create_remote() { + let td = TempDir::new("test").unwrap(); + let remote = td.path().join("remote"); + Repository::init_bare(&remote).unwrap(); + + let (_td, repo) = ::test::repo_init(); + let url = if cfg!(unix) { + format!("file://{}", remote.display()) + } else { + format!("file:///{}", remote.display().to_string() + .replace("\\", "/")) + }; + + let mut origin = repo.remote("origin", &url).unwrap(); + assert_eq!(origin.name(), Some("origin")); + assert_eq!(origin.url(), Some(&url[..])); + assert_eq!(origin.pushurl(), None); + + { + let mut specs = origin.refspecs(); + let spec = specs.next().unwrap(); + assert!(specs.next().is_none()); + assert_eq!(spec.str(), Some("+refs/heads/*:refs/remotes/origin/*")); + assert_eq!(spec.dst(), Some("refs/remotes/origin/*")); + assert_eq!(spec.src(), Some("refs/heads/*")); + assert!(spec.is_force()); + } + assert!(origin.refspecs().next_back().is_some()); + { + let remotes = repo.remotes().unwrap(); + assert_eq!(remotes.len(), 1); + assert_eq!(remotes.get(0), Some("origin")); + assert_eq!(remotes.iter().count(), 1); + assert_eq!(remotes.iter().next().unwrap(), Some("origin")); + } + + origin.connect(Direction::Push).unwrap(); + assert!(origin.connected()); + origin.disconnect(); + + origin.connect(Direction::Fetch).unwrap(); + assert!(origin.connected()); + origin.download(&[], None).unwrap(); + origin.disconnect(); + + origin.fetch(&[], None, None).unwrap(); + origin.fetch(&[], None, Some("foo")).unwrap(); + origin.update_tips(None, true, AutotagOption::Unspecified, None).unwrap(); + origin.update_tips(None, true, AutotagOption::All, Some("foo")).unwrap(); + + t!(repo.remote_add_fetch("origin", "foo")); + t!(repo.remote_add_fetch("origin", "bar")); + } + + #[test] + fn rename_remote() { + let (_td, repo) = ::test::repo_init(); + repo.remote("origin", "foo").unwrap(); + repo.remote_rename("origin", "foo").unwrap(); + repo.remote_delete("foo").unwrap(); + } + + #[test] + fn create_remote_anonymous() { + let td = TempDir::new("test").unwrap(); + let repo = Repository::init(td.path()).unwrap(); + + let origin = repo.remote_anonymous("/path/to/nowhere").unwrap(); + assert_eq!(origin.name(), None); + drop(origin.clone()); + } + + #[test] + fn is_valid() { + assert!(Remote::is_valid_name("foobar")); + assert!(!Remote::is_valid_name("\x01")); + } + + #[test] + fn transfer_cb() { + let (td, _repo) = ::test::repo_init(); + let td2 = TempDir::new("git").unwrap(); + let url = ::test::path2url(&td.path()); + + let repo = Repository::init(td2.path()).unwrap(); + let progress_hit = Cell::new(false); + { + let mut callbacks = RemoteCallbacks::new(); + let mut origin = repo.remote("origin", &url).unwrap(); + + callbacks.transfer_progress(|_progress| { + progress_hit.set(true); + true + }); + origin.fetch(&[], + Some(FetchOptions::new().remote_callbacks(callbacks)), + None).unwrap(); + + let list = t!(origin.list()); + assert_eq!(list.len(), 2); + assert_eq!(list[0].name(), "HEAD"); + assert!(!list[0].is_local()); + assert_eq!(list[1].name(), "refs/heads/master"); + assert!(!list[1].is_local()); + } + assert!(progress_hit.get()); + } + + #[test] + fn push() { + let (_td, repo) = ::test::repo_init(); + let td2 = TempDir::new("git1").unwrap(); + let td3 = TempDir::new("git2").unwrap(); + let url = ::test::path2url(&td2.path()); + + Repository::init_bare(td2.path()).unwrap(); + // git push + let mut remote = repo.remote("origin", &url).unwrap(); + let mut updated = false; + { + let mut callbacks = RemoteCallbacks::new(); + callbacks.push_update_reference(|refname, status| { + updated = true; + assert_eq!(refname, "refs/heads/master"); + assert_eq!(status, None); + Ok(()) + }); + let mut options = PushOptions::new(); + options.remote_callbacks(callbacks); + remote.push(&["refs/heads/master"], Some(&mut options)).unwrap(); + } + assert!(updated); + + let repo = Repository::clone(&url, td3.path()).unwrap(); + let commit = repo.head().unwrap().target().unwrap(); + let commit = repo.find_commit(commit).unwrap(); + assert_eq!(commit.message(), Some("initial")); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/repo.rs cargo-0.19.0/vendor/git2-0.6.4/src/repo.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/repo.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/repo.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,2257 @@ +use std::env; +use std::ffi::{CStr, CString, OsStr}; +use std::iter::IntoIterator; +use std::mem; +use std::path::Path; +use std::str; +use libc::{c_int, c_char, size_t, c_void, c_uint}; + +use {raw, Revspec, Error, init, Object, RepositoryOpenFlags, RepositoryState, Remote, Buf, StashFlags}; +use {ResetType, Signature, Reference, References, Submodule, Blame, BlameOptions}; +use {Branches, BranchType, Index, Config, Oid, Blob, Branch, Commit, Tree}; +use {AnnotatedCommit, MergeOptions, SubmoduleIgnore, SubmoduleStatus}; +use {ObjectType, Tag, Note, Notes, StatusOptions, Statuses, Status, Revwalk}; +use {RevparseMode, RepositoryInitMode, Reflog, IntoCString, Describe}; +use {DescribeOptions, TreeBuilder, Diff, DiffOptions, PackBuilder}; +use build::{RepoBuilder, CheckoutBuilder}; +use stash::{StashApplyOptions, StashCbData, stash_cb}; +use string_array::StringArray; +use oid_array::OidArray; +use util::{self, Binding}; + +/// An owned git repository, representing all state associated with the +/// underlying filesystem. +/// +/// This structure corresponds to a `git_repository` in libgit2. Many other +/// types in git2-rs are derivative from this structure and are attached to its +/// lifetime. +/// +/// When a repository goes out of scope it is freed in memory but not deleted +/// from the filesystem. +pub struct Repository { + raw: *mut raw::git_repository, +} + +// It is the current belief that a `Repository` can be sent among threads, or +// even shared among threads in a mutex. +unsafe impl Send for Repository {} + +/// Options which can be used to configure how a repository is initialized +pub struct RepositoryInitOptions { + flags: u32, + mode: u32, + workdir_path: Option, + description: Option, + template_path: Option, + initial_head: Option, + origin_url: Option, +} + +impl Repository { + /// Attempt to open an already-existing repository at `path`. + /// + /// The path can point to either a normal or bare repository. + pub fn open>(path: P) -> Result { + init(); + let path = try!(path.as_ref().into_c_string()); + let mut ret = 0 as *mut raw::git_repository; + unsafe { + try_call!(raw::git_repository_open(&mut ret, path)); + Ok(Binding::from_raw(ret)) + } + } + + /// Find and open an existing repository, respecting git environment + /// variables. This acts like `open_ext` with the + /// `REPOSITORY_OPEN_FROM_ENV` flag, but additionally respects `$GIT_DIR`. + /// With `$GIT_DIR` unset, this will search for a repository starting in + /// the current directory. + pub fn open_from_env() -> Result { + init(); + let mut ret = 0 as *mut raw::git_repository; + let flags = raw::GIT_REPOSITORY_OPEN_FROM_ENV; + unsafe { + try_call!(raw::git_repository_open_ext(&mut ret, + 0 as *const _, + flags as c_uint, + 0 as *const _)); + Ok(Binding::from_raw(ret)) + } + } + + /// Find and open an existing repository, with additional options. + /// + /// If flags contains REPOSITORY_OPEN_NO_SEARCH, the path must point + /// directly to a repository; otherwise, this may point to a subdirectory + /// of a repository, and `open_ext` will search up through parent + /// directories. + /// + /// If flags contains REPOSITORY_OPEN_CROSS_FS, the search through parent + /// directories will not cross a filesystem boundary (detected when the + /// stat st_dev field changes). + /// + /// If flags contains REPOSITORY_OPEN_BARE, force opening the repository as + /// bare even if it isn't, ignoring any working directory, and defer + /// loading the repository configuration for performance. + /// + /// If flags contains REPOSITORY_OPEN_NO_DOTGIT, don't try appending + /// `/.git` to `path`. + /// + /// If flags contains REPOSITORY_OPEN_FROM_ENV, `open_ext` will ignore + /// other flags and `ceiling_dirs`, and respect the same environment + /// variables git does. Note, however, that `path` overrides `$GIT_DIR`; to + /// respect `$GIT_DIR` as well, use `open_from_env`. + /// + /// ceiling_dirs specifies a list of paths that the search through parent + /// directories will stop before entering. Use the functions in std::env + /// to construct or manipulate such a path list. + pub fn open_ext(path: P, + flags: RepositoryOpenFlags, + ceiling_dirs: I) + -> Result + where P: AsRef, O: AsRef, I: IntoIterator + { + init(); + let path = try!(path.as_ref().into_c_string()); + let ceiling_dirs_os = try!(env::join_paths(ceiling_dirs)); + let ceiling_dirs = try!(ceiling_dirs_os.into_c_string()); + let mut ret = 0 as *mut raw::git_repository; + unsafe { + try_call!(raw::git_repository_open_ext(&mut ret, + path, + flags.bits() as c_uint, + ceiling_dirs)); + Ok(Binding::from_raw(ret)) + } + } + + /// Attempt to open an already-existing repository at or above `path` + /// + /// This starts at `path` and looks up the filesystem hierarchy + /// until it finds a repository. + pub fn discover>(path: P) -> Result { + // TODO: this diverges significantly from the libgit2 API + init(); + let buf = Buf::new(); + let path = try!(path.as_ref().into_c_string()); + unsafe { + try_call!(raw::git_repository_discover(buf.raw(), path, 1, + 0 as *const _)); + } + Repository::open(util::bytes2path(&*buf)) + } + + /// Creates a new repository in the specified folder. + /// + /// This by default will create any necessary directories to create the + /// repository, and it will read any user-specified templates when creating + /// the repository. This behavior can be configured through `init_opts`. + pub fn init>(path: P) -> Result { + Repository::init_opts(path, &RepositoryInitOptions::new()) + } + + /// Creates a new `--bare` repository in the specified folder. + /// + /// The folder must exist prior to invoking this function. + pub fn init_bare>(path: P) -> Result { + Repository::init_opts(path, RepositoryInitOptions::new().bare(true)) + } + + /// Creates a new `--bare` repository in the specified folder. + /// + /// The folder must exist prior to invoking this function. + pub fn init_opts>(path: P, opts: &RepositoryInitOptions) + -> Result { + init(); + let path = try!(path.as_ref().into_c_string()); + let mut ret = 0 as *mut raw::git_repository; + unsafe { + let mut opts = opts.raw(); + try_call!(raw::git_repository_init_ext(&mut ret, path, &mut opts)); + Ok(Binding::from_raw(ret)) + } + } + + /// Clone a remote repository. + /// + /// See the `RepoBuilder` struct for more information. This function will + /// delegate to a fresh `RepoBuilder` + pub fn clone>(url: &str, into: P) + -> Result { + ::init(); + RepoBuilder::new().clone(url, into.as_ref()) + } + + /// Execute a rev-parse operation against the `spec` listed. + /// + /// The resulting revision specification is returned, or an error is + /// returned if one occurs. + pub fn revparse(&self, spec: &str) -> Result { + let mut raw = raw::git_revspec { + from: 0 as *mut _, + to: 0 as *mut _, + flags: 0, + }; + let spec = try!(CString::new(spec)); + unsafe { + try_call!(raw::git_revparse(&mut raw, self.raw, spec)); + let to = Binding::from_raw_opt(raw.to); + let from = Binding::from_raw_opt(raw.from); + let mode = RevparseMode::from_bits_truncate(raw.flags as u32); + Ok(Revspec::from_objects(from, to, mode)) + } + } + + /// Find a single object, as specified by a revision string. + pub fn revparse_single(&self, spec: &str) -> Result { + let spec = try!(CString::new(spec)); + let mut obj = 0 as *mut raw::git_object; + unsafe { + try_call!(raw::git_revparse_single(&mut obj, self.raw, spec)); + assert!(!obj.is_null()); + Ok(Binding::from_raw(obj)) + } + } + + /// Find a single object and intermediate reference by a revision string. + /// + /// See `man gitrevisions`, or + /// http://git-scm.com/docs/git-rev-parse.html#_specifying_revisions for + /// information on the syntax accepted. + /// + /// In some cases (`@{<-n>}` or `@{upstream}`), the expression + /// may point to an intermediate reference. When such expressions are being + /// passed in, this intermediate reference is returned. + pub fn revparse_ext(&self, spec: &str) + -> Result<(Object, Option), Error> { + let spec = try!(CString::new(spec)); + let mut git_obj = 0 as *mut raw::git_object; + let mut git_ref = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_revparse_ext(&mut git_obj, &mut git_ref, + self.raw, spec)); + assert!(!git_obj.is_null()); + Ok((Binding::from_raw(git_obj), Binding::from_raw_opt(git_ref))) + } + } + + /// Tests whether this repository is a bare repository or not. + pub fn is_bare(&self) -> bool { + unsafe { raw::git_repository_is_bare(self.raw) == 1 } + } + + /// Tests whether this repository is a shallow clone. + pub fn is_shallow(&self) -> bool { + unsafe { raw::git_repository_is_shallow(self.raw) == 1 } + } + + /// Tests whether this repository is empty. + pub fn is_empty(&self) -> Result { + let empty = unsafe { + try_call!(raw::git_repository_is_empty(self.raw)) + }; + Ok(empty == 1) + } + + /// Returns the path to the `.git` folder for normal repositories or the + /// repository itself for bare repositories. + pub fn path(&self) -> &Path { + unsafe { + let ptr = raw::git_repository_path(self.raw); + util::bytes2path(::opt_bytes(self, ptr).unwrap()) + } + } + + /// Returns the current state of this repository + pub fn state(&self) -> RepositoryState { + let state = unsafe { raw::git_repository_state(self.raw) }; + macro_rules! check( ($($raw:ident => $real:ident),*) => ( + $(if state == raw::$raw as c_int { + super::RepositoryState::$real + }) else * + else { + panic!("unknown repository state: {}", state) + } + ) ); + + check!( + GIT_REPOSITORY_STATE_NONE => Clean, + GIT_REPOSITORY_STATE_MERGE => Merge, + GIT_REPOSITORY_STATE_REVERT => Revert, + GIT_REPOSITORY_STATE_REVERT_SEQUENCE => RevertSequence, + GIT_REPOSITORY_STATE_CHERRYPICK => CherryPick, + GIT_REPOSITORY_STATE_CHERRYPICK_SEQUENCE => CherryPickSequence, + GIT_REPOSITORY_STATE_BISECT => Bisect, + GIT_REPOSITORY_STATE_REBASE => Rebase, + GIT_REPOSITORY_STATE_REBASE_INTERACTIVE => RebaseInteractive, + GIT_REPOSITORY_STATE_REBASE_MERGE => RebaseMerge, + GIT_REPOSITORY_STATE_APPLY_MAILBOX => ApplyMailbox, + GIT_REPOSITORY_STATE_APPLY_MAILBOX_OR_REBASE => ApplyMailboxOrRebase + ) + } + + /// Get the path of the working directory for this repository. + /// + /// If this repository is bare, then `None` is returned. + pub fn workdir(&self) -> Option<&Path> { + unsafe { + let ptr = raw::git_repository_workdir(self.raw); + if ptr.is_null() { + None + } else { + Some(util::bytes2path(CStr::from_ptr(ptr).to_bytes())) + } + } + } + + /// Set the path to the working directory for this repository. + /// + /// If `update_link` is true, create/update the gitlink file in the workdir + /// and set config "core.worktree" (if workdir is not the parent of the .git + /// directory). + pub fn set_workdir(&self, path: &Path, update_gitlink: bool) + -> Result<(), Error> { + let path = try!(path.into_c_string()); + unsafe { + try_call!(raw::git_repository_set_workdir(self.raw(), path, + update_gitlink)); + } + Ok(()) + } + + /// Get the currently active namespace for this repository. + /// + /// If there is no namespace, or the namespace is not a valid utf8 string, + /// `None` is returned. + pub fn namespace(&self) -> Option<&str> { + self.namespace_bytes().and_then(|s| str::from_utf8(s).ok()) + } + + /// Get the currently active namespace for this repository as a byte array. + /// + /// If there is no namespace, `None` is returned. + pub fn namespace_bytes(&self) -> Option<&[u8]> { + unsafe { ::opt_bytes(self, raw::git_repository_get_namespace(self.raw)) } + } + + /// List all remotes for a given repository + pub fn remotes(&self) -> Result { + let mut arr = raw::git_strarray { + strings: 0 as *mut *mut c_char, + count: 0, + }; + unsafe { + try_call!(raw::git_remote_list(&mut arr, self.raw)); + Ok(Binding::from_raw(arr)) + } + } + + /// Get the information for a particular remote + pub fn find_remote(&self, name: &str) -> Result { + let mut ret = 0 as *mut raw::git_remote; + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_remote_lookup(&mut ret, self.raw, name)); + Ok(Binding::from_raw(ret)) + } + } + + /// Add a remote with the default fetch refspec to the repository's + /// configuration. + pub fn remote(&self, name: &str, url: &str) -> Result { + let mut ret = 0 as *mut raw::git_remote; + let name = try!(CString::new(name)); + let url = try!(CString::new(url)); + unsafe { + try_call!(raw::git_remote_create(&mut ret, self.raw, name, url)); + Ok(Binding::from_raw(ret)) + } + } + + /// Create an anonymous remote + /// + /// Create a remote with the given url and refspec in memory. You can use + /// this when you have a URL instead of a remote's name. Note that anonymous + /// remotes cannot be converted to persisted remotes. + pub fn remote_anonymous(&self, url: &str) -> Result { + let mut ret = 0 as *mut raw::git_remote; + let url = try!(CString::new(url)); + unsafe { + try_call!(raw::git_remote_create_anonymous(&mut ret, self.raw, url)); + Ok(Binding::from_raw(ret)) + } + } + + /// Give a remote a new name + /// + /// All remote-tracking branches and configuration settings for the remote + /// are updated. + /// + /// A temporary in-memory remote cannot be given a name with this method. + /// + /// No loaded instances of the remote with the old name will change their + /// name or their list of refspecs. + /// + /// The returned array of strings is a list of the non-default refspecs + /// which cannot be renamed and are returned for further processing by the + /// caller. + pub fn remote_rename(&self, name: &str, + new_name: &str) -> Result { + let name = try!(CString::new(name)); + let new_name = try!(CString::new(new_name)); + let mut problems = raw::git_strarray { + count: 0, + strings: 0 as *mut *mut c_char, + }; + unsafe { + try_call!(raw::git_remote_rename(&mut problems, self.raw, name, + new_name)); + Ok(Binding::from_raw(problems)) + } + } + + /// Delete an existing persisted remote. + /// + /// All remote-tracking branches and configuration settings for the remote + /// will be removed. + pub fn remote_delete(&self, name: &str) -> Result<(), Error> { + let name = try!(CString::new(name)); + unsafe { try_call!(raw::git_remote_delete(self.raw, name)); } + Ok(()) + } + + /// Add a fetch refspec to the remote's configuration + /// + /// Add the given refspec to the fetch list in the configuration. No loaded + /// remote instances will be affected. + pub fn remote_add_fetch(&self, name: &str, spec: &str) + -> Result<(), Error> { + let name = try!(CString::new(name)); + let spec = try!(CString::new(spec)); + unsafe { + try_call!(raw::git_remote_add_fetch(self.raw, name, spec)); + } + Ok(()) + } + + /// Add a push refspec to the remote's configuration. + /// + /// Add the given refspec to the push list in the configuration. No + /// loaded remote instances will be affected. + pub fn remote_add_push(&self, name: &str, spec: &str) + -> Result<(), Error> { + let name = try!(CString::new(name)); + let spec = try!(CString::new(spec)); + unsafe { + try_call!(raw::git_remote_add_push(self.raw, name, spec)); + } + Ok(()) + } + + /// Set the remote's url in the configuration + /// + /// Remote objects already in memory will not be affected. This assumes + /// the common case of a single-url remote and will otherwise return an + /// error. + pub fn remote_set_url(&self, name: &str, url: &str) -> Result<(), Error> { + let name = try!(CString::new(name)); + let url = try!(CString::new(url)); + unsafe { try_call!(raw::git_remote_set_url(self.raw, name, url)); } + Ok(()) + } + + /// Set the remote's url for pushing in the configuration. + /// + /// Remote objects already in memory will not be affected. This assumes + /// the common case of a single-url remote and will otherwise return an + /// error. + /// + /// `None` indicates that it should be cleared. + pub fn remote_set_pushurl(&self, name: &str, pushurl: Option<&str>) + -> Result<(), Error> { + let name = try!(CString::new(name)); + let pushurl = try!(::opt_cstr(pushurl)); + unsafe { + try_call!(raw::git_remote_set_pushurl(self.raw, name, pushurl)); + } + Ok(()) + } + + /// Sets the current head to the specified object and optionally resets + /// the index and working tree to match. + /// + /// A soft reset means the head will be moved to the commit. + /// + /// A mixed reset will trigger a soft reset, plus the index will be + /// replaced with the content of the commit tree. + /// + /// A hard reset will trigger a mixed reset and the working directory will + /// be replaced with the content of the index. (Untracked and ignored files + /// will be left alone, however.) + /// + /// The `target` is a commit-ish to which the head should be moved to. The + /// object can either be a commit or a tag, but tags must be dereferenceable + /// to a commit. + /// + /// The `checkout` options will only be used for a hard reset. + pub fn reset(&self, + target: &Object, + kind: ResetType, + checkout: Option<&mut CheckoutBuilder>) + -> Result<(), Error> { + unsafe { + let mut opts: raw::git_checkout_options = mem::zeroed(); + try_call!(raw::git_checkout_init_options(&mut opts, + raw::GIT_CHECKOUT_OPTIONS_VERSION)); + let opts = checkout.map(|c| { + c.configure(&mut opts); &mut opts + }); + try_call!(raw::git_reset(self.raw, target.raw(), kind, opts)); + } + Ok(()) + } + + /// Updates some entries in the index from the target commit tree. + /// + /// The scope of the updated entries is determined by the paths being + /// in the iterator provided. + /// + /// Passing a `None` target will result in removing entries in the index + /// matching the provided pathspecs. + pub fn reset_default(&self, + target: Option<&Object>, + paths: I) -> Result<(), Error> + where T: IntoCString, I: IntoIterator, + { + let (_a, _b, mut arr) = try!(::util::iter2cstrs(paths)); + let target = target.map(|t| t.raw()); + unsafe { + try_call!(raw::git_reset_default(self.raw, target, &mut arr)); + } + Ok(()) + } + + /// Retrieve and resolve the reference pointed at by HEAD. + pub fn head(&self) -> Result { + let mut ret = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_repository_head(&mut ret, self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// Make the repository HEAD point to the specified reference. + /// + /// If the provided reference points to a tree or a blob, the HEAD is + /// unaltered and an error is returned. + /// + /// If the provided reference points to a branch, the HEAD will point to + /// that branch, staying attached, or become attached if it isn't yet. If + /// the branch doesn't exist yet, no error will be returned. The HEAD will + /// then be attached to an unborn branch. + /// + /// Otherwise, the HEAD will be detached and will directly point to the + /// commit. + pub fn set_head(&self, refname: &str) -> Result<(), Error> { + let refname = try!(CString::new(refname)); + unsafe { + try_call!(raw::git_repository_set_head(self.raw, refname)); + } + Ok(()) + } + + /// Make the repository HEAD directly point to the commit. + /// + /// If the provided committish cannot be found in the repository, the HEAD + /// is unaltered and an error is returned. + /// + /// If the provided commitish cannot be peeled into a commit, the HEAD is + /// unaltered and an error is returned. + /// + /// Otherwise, the HEAD will eventually be detached and will directly point + /// to the peeled commit. + pub fn set_head_detached(&self, commitish: Oid) -> Result<(), Error> { + unsafe { + try_call!(raw::git_repository_set_head_detached(self.raw, + commitish.raw())); + } + Ok(()) + } + + /// Create an iterator for the repo's references + pub fn references(&self) -> Result { + let mut ret = 0 as *mut raw::git_reference_iterator; + unsafe { + try_call!(raw::git_reference_iterator_new(&mut ret, self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// Create an iterator for the repo's references that match the specified + /// glob + pub fn references_glob(&self, glob: &str) -> Result { + let mut ret = 0 as *mut raw::git_reference_iterator; + let glob = try!(CString::new(glob)); + unsafe { + try_call!(raw::git_reference_iterator_glob_new(&mut ret, self.raw, + glob)); + + Ok(Binding::from_raw(ret)) + } + } + + /// Load all submodules for this repository and return them. + pub fn submodules(&self) -> Result, Error> { + struct Data<'a, 'b:'a> { + repo: &'b Repository, + ret: &'a mut Vec>, + } + let mut ret = Vec::new(); + + unsafe { + let mut data = Data { + repo: self, + ret: &mut ret, + }; + try_call!(raw::git_submodule_foreach(self.raw, append, + &mut data as *mut _ + as *mut c_void)); + } + + return Ok(ret); + + extern fn append(_repo: *mut raw::git_submodule, + name: *const c_char, + data: *mut c_void) -> c_int { + unsafe { + let data = &mut *(data as *mut Data); + let mut raw = 0 as *mut raw::git_submodule; + let rc = raw::git_submodule_lookup(&mut raw, data.repo.raw(), + name); + assert_eq!(rc, 0); + data.ret.push(Binding::from_raw(raw)); + } + 0 + } + } + + /// Gather file status information and populate the returned structure. + /// + /// Note that if a pathspec is given in the options to filter the + /// status, then the results from rename detection (if you enable it) may + /// not be accurate. To do rename detection properly, this must be called + /// with no pathspec so that all files can be considered. + pub fn statuses(&self, options: Option<&mut StatusOptions>) + -> Result { + let mut ret = 0 as *mut raw::git_status_list; + unsafe { + try_call!(raw::git_status_list_new(&mut ret, self.raw, + options.map(|s| s.raw()) + .unwrap_or(0 as *const _))); + Ok(Binding::from_raw(ret)) + } + } + + /// Test if the ignore rules apply to a given file. + /// + /// This function checks the ignore rules to see if they would apply to the + /// given file. This indicates if the file would be ignored regardless of + /// whether the file is already in the index or committed to the repository. + /// + /// One way to think of this is if you were to do "git add ." on the + /// directory containing the file, would it be added or not? + pub fn status_should_ignore(&self, path: &Path) -> Result { + let mut ret = 0 as c_int; + let path = try!(path.into_c_string()); + unsafe { + try_call!(raw::git_status_should_ignore(&mut ret, self.raw, + path)); + } + Ok(ret != 0) + } + + /// Get file status for a single file. + /// + /// This tries to get status for the filename that you give. If no files + /// match that name (in either the HEAD, index, or working directory), this + /// returns NotFound. + /// + /// If the name matches multiple files (for example, if the path names a + /// directory or if running on a case- insensitive filesystem and yet the + /// HEAD has two entries that both match the path), then this returns + /// Ambiguous because it cannot give correct results. + /// + /// This does not do any sort of rename detection. Renames require a set of + /// targets and because of the path filtering, there is not enough + /// information to check renames correctly. To check file status with rename + /// detection, there is no choice but to do a full `statuses` and scan + /// through looking for the path that you are interested in. + pub fn status_file(&self, path: &Path) -> Result { + let mut ret = 0 as c_uint; + let path = try!(path.into_c_string()); + unsafe { + try_call!(raw::git_status_file(&mut ret, self.raw, + path)); + } + Ok(Status::from_bits_truncate(ret as u32)) + } + + /// Create an iterator which loops over the requested branches. + pub fn branches(&self, filter: Option) + -> Result { + let mut raw = 0 as *mut raw::git_branch_iterator; + unsafe { + try_call!(raw::git_branch_iterator_new(&mut raw, self.raw(), filter)); + Ok(Branches::from_raw(raw)) + } + } + + /// Get the Index file for this repository. + /// + /// If a custom index has not been set, the default index for the repository + /// will be returned (the one located in .git/index). + pub fn index(&self) -> Result { + let mut raw = 0 as *mut raw::git_index; + unsafe { + try_call!(raw::git_repository_index(&mut raw, self.raw())); + Ok(Binding::from_raw(raw)) + } + } + + /// Set the Index file for this repository. + pub fn set_index(&self, index: &mut Index) { + unsafe { + raw::git_repository_set_index(self.raw(), index.raw()); + } + } + + /// Get the configuration file for this repository. + /// + /// If a configuration file has not been set, the default config set for the + /// repository will be returned, including global and system configurations + /// (if they are available). + pub fn config(&self) -> Result { + let mut raw = 0 as *mut raw::git_config; + unsafe { + try_call!(raw::git_repository_config(&mut raw, self.raw())); + Ok(Binding::from_raw(raw)) + } + } + + /// Write an in-memory buffer to the ODB as a blob. + /// + /// The Oid returned can in turn be passed to `find_blob` to get a handle to + /// the blob. + pub fn blob(&self, data: &[u8]) -> Result { + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + let ptr = data.as_ptr() as *const c_void; + let len = data.len() as size_t; + try_call!(raw::git_blob_create_frombuffer(&mut raw, self.raw(), + ptr, len)); + Ok(Binding::from_raw(&raw as *const _)) + } + } + + /// Read a file from the filesystem and write its content to the Object + /// Database as a loose blob + /// + /// The Oid returned can in turn be passed to `find_blob` to get a handle to + /// the blob. + pub fn blob_path(&self, path: &Path) -> Result { + let path = try!(path.into_c_string()); + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_blob_create_fromdisk(&mut raw, self.raw(), + path)); + Ok(Binding::from_raw(&raw as *const _)) + } + } + + /// Lookup a reference to one of the objects in a repository. + pub fn find_blob(&self, oid: Oid) -> Result { + let mut raw = 0 as *mut raw::git_blob; + unsafe { + try_call!(raw::git_blob_lookup(&mut raw, self.raw(), oid.raw())); + Ok(Binding::from_raw(raw)) + } + } + + /// Create a new branch pointing at a target commit + /// + /// A new direct reference will be created pointing to this target commit. + /// If `force` is true and a reference already exists with the given name, + /// it'll be replaced. + pub fn branch(&self, + branch_name: &str, + target: &Commit, + force: bool) -> Result { + let branch_name = try!(CString::new(branch_name)); + let mut raw = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_branch_create(&mut raw, + self.raw(), + branch_name, + target.raw(), + force)); + Ok(Branch::wrap(Binding::from_raw(raw))) + } + } + + /// Lookup a branch by its name in a repository. + pub fn find_branch(&self, name: &str, branch_type: BranchType) + -> Result { + let name = try!(CString::new(name)); + let mut ret = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_branch_lookup(&mut ret, self.raw(), name, + branch_type)); + Ok(Branch::wrap(Binding::from_raw(ret))) + } + } + + /// Create new commit in the repository + /// + /// If the `update_ref` is not `None`, name of the reference that will be + /// updated to point to this commit. If the reference is not direct, it will + /// be resolved to a direct reference. Use "HEAD" to update the HEAD of the + /// current branch and make it point to this commit. If the reference + /// doesn't exist yet, it will be created. If it does exist, the first + /// parent must be the tip of this branch. + pub fn commit(&self, + update_ref: Option<&str>, + author: &Signature, + committer: &Signature, + message: &str, + tree: &Tree, + parents: &[&Commit]) -> Result { + let update_ref = try!(::opt_cstr(update_ref)); + let mut parent_ptrs = parents.iter().map(|p| { + p.raw() as *const raw::git_commit + }).collect::>(); + let message = try!(CString::new(message)); + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_commit_create(&mut raw, + self.raw(), + update_ref, + author.raw(), + committer.raw(), + 0 as *const c_char, + message, + tree.raw(), + parents.len() as size_t, + parent_ptrs.as_mut_ptr())); + Ok(Binding::from_raw(&raw as *const _)) + } + } + + + /// Lookup a reference to one of the commits in a repository. + pub fn find_commit(&self, oid: Oid) -> Result { + let mut raw = 0 as *mut raw::git_commit; + unsafe { + try_call!(raw::git_commit_lookup(&mut raw, self.raw(), oid.raw())); + Ok(Binding::from_raw(raw)) + } + } + + /// Lookup a reference to one of the objects in a repository. + pub fn find_object(&self, oid: Oid, + kind: Option) -> Result { + let mut raw = 0 as *mut raw::git_object; + unsafe { + try_call!(raw::git_object_lookup(&mut raw, self.raw(), oid.raw(), + kind)); + Ok(Binding::from_raw(raw)) + } + } + + /// Create a new direct reference. + /// + /// This function will return an error if a reference already exists with + /// the given name unless force is true, in which case it will be + /// overwritten. + pub fn reference(&self, name: &str, id: Oid, force: bool, + log_message: &str) -> Result { + let name = try!(CString::new(name)); + let log_message = try!(CString::new(log_message)); + let mut raw = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_reference_create(&mut raw, self.raw(), name, + id.raw(), force, + log_message)); + Ok(Binding::from_raw(raw)) + } + } + + /// Conditionally create new direct reference. + /// + /// A direct reference (also called an object id reference) refers directly + /// to a specific object id (a.k.a. OID or SHA) in the repository. The id + /// permanently refers to the object (although the reference itself can be + /// moved). For example, in libgit2 the direct ref "refs/tags/v0.17.0" + /// refers to OID 5b9fac39d8a76b9139667c26a63e6b3f204b3977. + /// + /// The direct reference will be created in the repository and written to + /// the disk. + /// + /// Valid reference names must follow one of two patterns: + /// + /// 1. Top-level names must contain only capital letters and underscores, + /// and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). + /// 2. Names prefixed with "refs/" can be almost anything. You must avoid + /// the characters `~`, `^`, `:`, `\\`, `?`, `[`, and `*`, and the + /// sequences ".." and "@{" which have special meaning to revparse. + /// + /// This function will return an error if a reference already exists with + /// the given name unless `force` is true, in which case it will be + /// overwritten. + /// + /// The message for the reflog will be ignored if the reference does not + /// belong in the standard set (HEAD, branches and remote-tracking + /// branches) and it does not have a reflog. + /// + /// It will return GIT_EMODIFIED if the reference's value at the time of + /// updating does not match the one passed through `current_id` (i.e. if the + /// ref has changed since the user read it). + pub fn reference_matching(&self, + name: &str, + id: Oid, + force: bool, + current_id: Oid, + log_message: &str) -> Result { + let name = try!(CString::new(name)); + let log_message = try!(CString::new(log_message)); + let mut raw = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_reference_create_matching(&mut raw, + self.raw(), + name, + id.raw(), + force, + current_id.raw(), + log_message)); + Ok(Binding::from_raw(raw)) + } + } + + /// Create a new symbolic reference. + /// + /// This function will return an error if a reference already exists with + /// the given name unless force is true, in which case it will be + /// overwritten. + pub fn reference_symbolic(&self, name: &str, target: &str, + force: bool, + log_message: &str) + -> Result { + let name = try!(CString::new(name)); + let target = try!(CString::new(target)); + let log_message = try!(CString::new(log_message)); + let mut raw = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_reference_symbolic_create(&mut raw, self.raw(), + name, target, force, + log_message)); + Ok(Binding::from_raw(raw)) + } + } + + /// Create a new symbolic reference. + /// + /// This function will return an error if a reference already exists with + /// the given name unless force is true, in which case it will be + /// overwritten. + /// + /// It will return GIT_EMODIFIED if the reference's value at the time of + /// updating does not match the one passed through current_value (i.e. if + /// the ref has changed since the user read it). + pub fn reference_symbolic_matching(&self, + name: &str, + target: &str, + force: bool, + current_value: &str, + log_message: &str) + -> Result { + let name = try!(CString::new(name)); + let target = try!(CString::new(target)); + let current_value = try!(CString::new(current_value)); + let log_message = try!(CString::new(log_message)); + let mut raw = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_reference_symbolic_create_matching(&mut raw, + self.raw(), + name, + target, + force, + current_value, + log_message)); + Ok(Binding::from_raw(raw)) + } + } + + /// Lookup a reference to one of the objects in a repository. + pub fn find_reference(&self, name: &str) -> Result { + let name = try!(CString::new(name)); + let mut raw = 0 as *mut raw::git_reference; + unsafe { + try_call!(raw::git_reference_lookup(&mut raw, self.raw(), name)); + Ok(Binding::from_raw(raw)) + } + } + + /// Lookup a reference by name and resolve immediately to OID. + /// + /// This function provides a quick way to resolve a reference name straight + /// through to the object id that it refers to. This avoids having to + /// allocate or free any `Reference` objects for simple situations. + pub fn refname_to_id(&self, name: &str) -> Result { + let name = try!(CString::new(name)); + let mut ret = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_reference_name_to_id(&mut ret, self.raw(), name)); + Ok(Binding::from_raw(&ret as *const _)) + } + } + + /// Creates a git_annotated_commit from the given reference. + pub fn reference_to_annotated_commit(&self, reference: &Reference) + -> Result { + let mut ret = 0 as *mut raw::git_annotated_commit; + unsafe { + try_call!(raw::git_annotated_commit_from_ref(&mut ret, + self.raw(), + reference.raw())); + Ok(AnnotatedCommit::from_raw(ret)) + } + } + + /// Create a new action signature with default user and now timestamp. + /// + /// This looks up the user.name and user.email from the configuration and + /// uses the current time as the timestamp, and creates a new signature + /// based on that information. It will return `NotFound` if either the + /// user.name or user.email are not set. + pub fn signature(&self) -> Result, Error> { + let mut ret = 0 as *mut raw::git_signature; + unsafe { + try_call!(raw::git_signature_default(&mut ret, self.raw())); + Ok(Binding::from_raw(ret)) + } + } + + /// Set up a new git submodule for checkout. + /// + /// This does "git submodule add" up to the fetch and checkout of the + /// submodule contents. It preps a new submodule, creates an entry in + /// `.gitmodules` and creates an empty initialized repository either at the + /// given path in the working directory or in `.git/modules` with a gitlink + /// from the working directory to the new repo. + /// + /// To fully emulate "git submodule add" call this function, then `open()` + /// the submodule repo and perform the clone step as needed. Lastly, call + /// `finalize()` to wrap up adding the new submodule and `.gitmodules` to + /// the index to be ready to commit. + pub fn submodule(&self, url: &str, path: &Path, + use_gitlink: bool) -> Result { + let url = try!(CString::new(url)); + let path = try!(path.into_c_string()); + let mut raw = 0 as *mut raw::git_submodule; + unsafe { + try_call!(raw::git_submodule_add_setup(&mut raw, self.raw(), + url, path, use_gitlink)); + Ok(Binding::from_raw(raw)) + } + } + + /// Lookup submodule information by name or path. + /// + /// Given either the submodule name or path (they are usually the same), + /// this returns a structure describing the submodule. + pub fn find_submodule(&self, name: &str) -> Result { + let name = try!(CString::new(name)); + let mut raw = 0 as *mut raw::git_submodule; + unsafe { + try_call!(raw::git_submodule_lookup(&mut raw, self.raw(), name)); + Ok(Binding::from_raw(raw)) + } + } + + /// Get the status for a submodule. + /// + /// This looks at a submodule and tries to determine the status. It + /// will return a combination of the `SubmoduleStatus` values. + pub fn submodule_status(&self, name: &str, ignore: SubmoduleIgnore) + -> Result { + let mut ret = 0; + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_submodule_status(&mut ret, self.raw, name, + ignore)); + } + Ok(SubmoduleStatus::from_bits_truncate(ret as u32)) + } + + /// Lookup a reference to one of the objects in a repository. + pub fn find_tree(&self, oid: Oid) -> Result { + let mut raw = 0 as *mut raw::git_tree; + unsafe { + try_call!(raw::git_tree_lookup(&mut raw, self.raw(), oid.raw())); + Ok(Binding::from_raw(raw)) + } + } + + /// Create a new TreeBuilder, optionally initialized with the + /// entries of the given Tree. + /// + /// The tree builder can be used to create or modify trees in memory and + /// write them as tree objects to the database. + pub fn treebuilder(&self, tree: Option<&Tree>) -> Result { + unsafe { + let mut ret = 0 as *mut raw::git_treebuilder; + let tree = match tree { + Some(tree) => tree.raw(), + None => 0 as *mut raw::git_tree, + }; + try_call!(raw::git_treebuilder_new(&mut ret, self.raw, tree)); + Ok(Binding::from_raw(ret)) + } + } + + + /// Create a new tag in the repository from an object + /// + /// A new reference will also be created pointing to this tag object. If + /// `force` is true and a reference already exists with the given name, + /// it'll be replaced. + /// + /// The message will not be cleaned up. + /// + /// The tag name will be checked for validity. You must avoid the characters + /// '~', '^', ':', ' \ ', '?', '[', and '*', and the sequences ".." and " @ + /// {" which have special meaning to revparse. + pub fn tag(&self, name: &str, target: &Object, + tagger: &Signature, message: &str, + force: bool) -> Result { + let name = try!(CString::new(name)); + let message = try!(CString::new(message)); + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_tag_create(&mut raw, self.raw, name, + target.raw(), tagger.raw(), + message, force)); + Ok(Binding::from_raw(&raw as *const _)) + } + } + + /// Create a new lightweight tag pointing at a target object + /// + /// A new direct reference will be created pointing to this target object. + /// If force is true and a reference already exists with the given name, + /// it'll be replaced. + pub fn tag_lightweight(&self, + name: &str, + target: &Object, + force: bool) -> Result { + let name = try!(CString::new(name)); + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_tag_create_lightweight(&mut raw, self.raw, name, + target.raw(), force)); + Ok(Binding::from_raw(&raw as *const _)) + } + } + + /// Lookup a tag object from the repository. + pub fn find_tag(&self, id: Oid) -> Result { + let mut raw = 0 as *mut raw::git_tag; + unsafe { + try_call!(raw::git_tag_lookup(&mut raw, self.raw, id.raw())); + Ok(Binding::from_raw(raw)) + } + } + + /// Delete an existing tag reference. + /// + /// The tag name will be checked for validity, see `tag` for some rules + /// about valid names. + pub fn tag_delete(&self, name: &str) -> Result<(), Error> { + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_tag_delete(self.raw, name)); + Ok(()) + } + } + + /// Get a list with all the tags in the repository. + /// + /// An optional fnmatch pattern can also be specified. + pub fn tag_names(&self, pattern: Option<&str>) -> Result { + let mut arr = raw::git_strarray { + strings: 0 as *mut *mut c_char, + count: 0, + }; + unsafe { + match pattern { + Some(s) => { + let s = try!(CString::new(s)); + try_call!(raw::git_tag_list_match(&mut arr, s, self.raw)); + } + None => { try_call!(raw::git_tag_list(&mut arr, self.raw)); } + } + Ok(Binding::from_raw(arr)) + } + } + + /// Updates files in the index and the working tree to match the content of + /// the commit pointed at by HEAD. + pub fn checkout_head(&self, opts: Option<&mut CheckoutBuilder>) + -> Result<(), Error> { + unsafe { + let mut raw_opts = mem::zeroed(); + try_call!(raw::git_checkout_init_options(&mut raw_opts, + raw::GIT_CHECKOUT_OPTIONS_VERSION)); + if let Some(c) = opts { + c.configure(&mut raw_opts); + } + + try_call!(raw::git_checkout_head(self.raw, &raw_opts)); + } + Ok(()) + } + + /// Updates files in the working tree to match the content of the index. + /// + /// If the index is `None`, the repository's index will be used. + pub fn checkout_index(&self, + index: Option<&mut Index>, + opts: Option<&mut CheckoutBuilder>) -> Result<(), Error> { + unsafe { + let mut raw_opts = mem::zeroed(); + try_call!(raw::git_checkout_init_options(&mut raw_opts, + raw::GIT_CHECKOUT_OPTIONS_VERSION)); + match opts { + Some(c) => c.configure(&mut raw_opts), + None => {} + } + + try_call!(raw::git_checkout_index(self.raw, + index.map(|i| &mut *i.raw()), + &raw_opts)); + } + Ok(()) + } + + /// Updates files in the index and working tree to match the content of the + /// tree pointed at by the treeish. + pub fn checkout_tree(&self, + treeish: &Object, + opts: Option<&mut CheckoutBuilder>) -> Result<(), Error> { + unsafe { + let mut raw_opts = mem::zeroed(); + try_call!(raw::git_checkout_init_options(&mut raw_opts, + raw::GIT_CHECKOUT_OPTIONS_VERSION)); + match opts { + Some(c) => c.configure(&mut raw_opts), + None => {} + } + + try_call!(raw::git_checkout_tree(self.raw, &*treeish.raw(), + &raw_opts)); + } + Ok(()) + } + + /// Merges the given commit(s) into HEAD, writing the results into the + /// working directory. Any changes are staged for commit and any conflicts + /// are written to the index. Callers should inspect the repository's index + /// after this completes, resolve any conflicts and prepare a commit. + /// + /// For compatibility with git, the repository is put into a merging state. + /// Once the commit is done (or if the uses wishes to abort), you should + /// clear this state by calling git_repository_state_cleanup(). + pub fn merge(&self, + annotated_commits: &[&AnnotatedCommit], + merge_opts: Option<&mut MergeOptions>, + checkout_opts: Option<&mut CheckoutBuilder>) + -> Result<(), Error> + { + unsafe { + let mut raw_checkout_opts = mem::zeroed(); + try_call!(raw::git_checkout_init_options(&mut raw_checkout_opts, + raw::GIT_CHECKOUT_OPTIONS_VERSION)); + if let Some(c) = checkout_opts { + c.configure(&mut raw_checkout_opts); + } + + let mut commit_ptrs = annotated_commits.iter().map(|c| { + c.raw() as *const raw::git_annotated_commit + }).collect::>(); + + try_call!(raw::git_merge(self.raw, + commit_ptrs.as_mut_ptr(), + annotated_commits.len() as size_t, + merge_opts.map(|o| o.raw()) + .unwrap_or(0 as *const _), + &raw_checkout_opts)); + } + Ok(()) + } + + /// Merge two commits, producing an index that reflects the result of + /// the merge. The index may be written as-is to the working directory or + /// checked out. If the index is to be converted to a tree, the caller + /// should resolve any conflicts that arose as part of the merge. + pub fn merge_commits(&self, our_commit: &Commit, their_commit: &Commit, + opts: Option<&MergeOptions>) -> Result { + let mut raw = 0 as *mut raw::git_index; + unsafe { + try_call!(raw::git_merge_commits(&mut raw, self.raw, + our_commit.raw(), + their_commit.raw(), + opts.map(|o| o.raw()))); + Ok(Binding::from_raw(raw)) + } + } + + /// Remove all the metadata associated with an ongoing command like merge, + /// revert, cherry-pick, etc. For example: MERGE_HEAD, MERGE_MSG, etc. + pub fn cleanup_state(&self) -> Result<(), Error> { + unsafe { + try_call!(raw::git_repository_state_cleanup(self.raw)); + } + Ok(()) + } + + /// Add a note for an object + /// + /// The `notes_ref` argument is the canonical name of the reference to use, + /// defaulting to "refs/notes/commits". If `force` is specified then + /// previous notes are overwritten. + pub fn note(&self, + author: &Signature, + committer: &Signature, + notes_ref: Option<&str>, + oid: Oid, + note: &str, + force: bool) -> Result { + let notes_ref = try!(::opt_cstr(notes_ref)); + let note = try!(CString::new(note)); + let mut ret = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_note_create(&mut ret, + self.raw, + notes_ref, + author.raw(), + committer.raw(), + oid.raw(), + note, + force)); + Ok(Binding::from_raw(&ret as *const _)) + } + } + + /// Get the default notes reference for this repository + pub fn note_default_ref(&self) -> Result { + let ret = Buf::new(); + unsafe { + try_call!(raw::git_note_default_ref(ret.raw(), self.raw)); + } + Ok(str::from_utf8(&ret).unwrap().to_string()) + } + + /// Creates a new iterator for notes in this repository. + /// + /// The `notes_ref` argument is the canonical name of the reference to use, + /// defaulting to "refs/notes/commits". + /// + /// The iterator returned yields pairs of (Oid, Oid) where the first element + /// is the id of the note and the second id is the id the note is + /// annotating. + pub fn notes(&self, notes_ref: Option<&str>) -> Result { + let notes_ref = try!(::opt_cstr(notes_ref)); + let mut ret = 0 as *mut raw::git_note_iterator; + unsafe { + try_call!(raw::git_note_iterator_new(&mut ret, self.raw, notes_ref)); + Ok(Binding::from_raw(ret)) + } + } + + /// Read the note for an object. + /// + /// The `notes_ref` argument is the canonical name of the reference to use, + /// defaulting to "refs/notes/commits". + /// + /// The id specified is the Oid of the git object to read the note from. + pub fn find_note(&self, notes_ref: Option<&str>, id: Oid) + -> Result { + let notes_ref = try!(::opt_cstr(notes_ref)); + let mut ret = 0 as *mut raw::git_note; + unsafe { + try_call!(raw::git_note_read(&mut ret, self.raw, notes_ref, + id.raw())); + Ok(Binding::from_raw(ret)) + } + } + + /// Remove the note for an object. + /// + /// The `notes_ref` argument is the canonical name of the reference to use, + /// defaulting to "refs/notes/commits". + /// + /// The id specified is the Oid of the git object to remove the note from. + pub fn note_delete(&self, + id: Oid, + notes_ref: Option<&str>, + author: &Signature, + committer: &Signature) -> Result<(), Error> { + let notes_ref = try!(::opt_cstr(notes_ref)); + unsafe { + try_call!(raw::git_note_remove(self.raw, notes_ref, author.raw(), + committer.raw(), id.raw())); + Ok(()) + } + } + + /// Create a revwalk that can be used to traverse the commit graph. + pub fn revwalk(&self) -> Result { + let mut raw = 0 as *mut raw::git_revwalk; + unsafe { + try_call!(raw::git_revwalk_new(&mut raw, self.raw())); + Ok(Binding::from_raw(raw)) + } + } + + /// Get the blame for a single file. + pub fn blame_file(&self, path: &Path, opts: Option<&mut BlameOptions>) + -> Result { + let path = try!(path.into_c_string()); + let mut raw = 0 as *mut raw::git_blame; + + unsafe { + try_call!(raw::git_blame_file(&mut raw, + self.raw(), + path, + opts.map(|s| s.raw()))); + Ok(Binding::from_raw(raw)) + } + } + + /// Find a merge base between two commits + pub fn merge_base(&self, one: Oid, two: Oid) -> Result { + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_merge_base(&mut raw, self.raw, + one.raw(), two.raw())); + Ok(Binding::from_raw(&raw as *const _)) + } + } + + /// Find all merge bases between two commits + pub fn merge_bases(&self, one: Oid, two: Oid) -> Result { + let mut arr = raw::git_oidarray { + ids: 0 as *mut raw::git_oid, + count: 0, + }; + unsafe { + try_call!(raw::git_merge_bases(&mut arr, self.raw, + one.raw(), two.raw())); + Ok(Binding::from_raw(arr)) + } + } + + + /// Count the number of unique commits between two commit objects + /// + /// There is no need for branches containing the commits to have any + /// upstream relationship, but it helps to think of one as a branch and the + /// other as its upstream, the ahead and behind values will be what git + /// would report for the branches. + pub fn graph_ahead_behind(&self, local: Oid, upstream: Oid) + -> Result<(usize, usize), Error> { + unsafe { + let mut ahead: size_t = 0; + let mut behind: size_t = 0; + try_call!(raw::git_graph_ahead_behind(&mut ahead, &mut behind, + self.raw(), local.raw(), + upstream.raw())); + Ok((ahead as usize, behind as usize)) + } + } + + /// Determine if a commit is the descendant of another commit + pub fn graph_descendant_of(&self, commit: Oid, ancestor: Oid) + -> Result { + unsafe { + let rv = try_call!(raw::git_graph_descendant_of(self.raw(), + commit.raw(), + ancestor.raw())); + Ok(rv != 0) + } + } + + /// Read the reflog for the given reference + /// + /// If there is no reflog file for the given reference yet, an empty reflog + /// object will be returned. + pub fn reflog(&self, name: &str) -> Result { + let name = try!(CString::new(name)); + let mut ret = 0 as *mut raw::git_reflog; + unsafe { + try_call!(raw::git_reflog_read(&mut ret, self.raw, name)); + Ok(Binding::from_raw(ret)) + } + } + + /// Delete the reflog for the given reference + pub fn reflog_delete(&self, name: &str) -> Result<(), Error> { + let name = try!(CString::new(name)); + unsafe { try_call!(raw::git_reflog_delete(self.raw, name)); } + Ok(()) + } + + /// Rename a reflog + /// + /// The reflog to be renamed is expected to already exist. + pub fn reflog_rename(&self, old_name: &str, new_name: &str) + -> Result<(), Error> { + let old_name = try!(CString::new(old_name)); + let new_name = try!(CString::new(new_name)); + unsafe { + try_call!(raw::git_reflog_rename(self.raw, old_name, new_name)); + } + Ok(()) + } + + /// Check if the given reference has a reflog. + pub fn reference_has_log(&self, name: &str) -> Result { + let name = try!(CString::new(name)); + let ret = unsafe { + try_call!(raw::git_reference_has_log(self.raw, name)) + }; + Ok(ret != 0) + } + + /// Ensure that the given reference has a reflog. + pub fn reference_ensure_log(&self, name: &str) -> Result<(), Error> { + let name = try!(CString::new(name)); + unsafe { + try_call!(raw::git_reference_ensure_log(self.raw, name)); + } + Ok(()) + } + + /// Describes a commit + /// + /// Performs a describe operation on the current commit and the worktree. + /// After performing a describe on HEAD, a status is run and description is + /// considered to be dirty if there are. + pub fn describe(&self, opts: &DescribeOptions) -> Result { + let mut ret = 0 as *mut _; + unsafe { + try_call!(raw::git_describe_workdir(&mut ret, self.raw, opts.raw())); + Ok(Binding::from_raw(ret)) + } + } + + /// Create a diff with the difference between two tree objects. + /// + /// This is equivalent to `git diff ` + /// + /// The first tree will be used for the "old_file" side of the delta and the + /// second tree will be used for the "new_file" side of the delta. You can + /// pass `None` to indicate an empty tree, although it is an error to pass + /// `None` for both the `old_tree` and `new_tree`. + pub fn diff_tree_to_tree(&self, + old_tree: Option<&Tree>, + new_tree: Option<&Tree>, + opts: Option<&mut DiffOptions>) + -> Result { + let mut ret = 0 as *mut raw::git_diff; + unsafe { + try_call!(raw::git_diff_tree_to_tree(&mut ret, + self.raw(), + old_tree.map(|s| s.raw()), + new_tree.map(|s| s.raw()), + opts.map(|s| s.raw()))); + Ok(Binding::from_raw(ret)) + } + } + + /// Create a diff between a tree and repository index. + /// + /// This is equivalent to `git diff --cached ` or if you pass + /// the HEAD tree, then like `git diff --cached`. + /// + /// The tree you pass will be used for the "old_file" side of the delta, and + /// the index will be used for the "new_file" side of the delta. + /// + /// If you pass `None` for the index, then the existing index of the `repo` + /// will be used. In this case, the index will be refreshed from disk + /// (if it has changed) before the diff is generated. + /// + /// If the tree is `None`, then it is considered an empty tree. + pub fn diff_tree_to_index(&self, + old_tree: Option<&Tree>, + index: Option<&Index>, + opts: Option<&mut DiffOptions>) + -> Result { + let mut ret = 0 as *mut raw::git_diff; + unsafe { + try_call!(raw::git_diff_tree_to_index(&mut ret, + self.raw(), + old_tree.map(|s| s.raw()), + index.map(|s| s.raw()), + opts.map(|s| s.raw()))); + Ok(Binding::from_raw(ret)) + } + } + + /// Create a diff between two index objects. + /// + /// The first index will be used for the "old_file" side of the delta, and + /// the second index will be used for the "new_file" side of the delta. + pub fn diff_index_to_index(&self, + old_index: &Index, + new_index: &Index, + opts: Option<&mut DiffOptions>) + -> Result { + let mut ret = 0 as *mut raw::git_diff; + unsafe { + try_call!(raw::git_diff_index_to_index(&mut ret, + self.raw(), + old_index.raw(), + new_index.raw(), + opts.map(|s| s.raw()))); + Ok(Binding::from_raw(ret)) + } + } + + /// Create a diff between the repository index and the workdir directory. + /// + /// This matches the `git diff` command. See the note below on + /// `tree_to_workdir` for a discussion of the difference between + /// `git diff` and `git diff HEAD` and how to emulate a `git diff ` + /// using libgit2. + /// + /// The index will be used for the "old_file" side of the delta, and the + /// working directory will be used for the "new_file" side of the delta. + /// + /// If you pass `None` for the index, then the existing index of the `repo` + /// will be used. In this case, the index will be refreshed from disk + /// (if it has changed) before the diff is generated. + pub fn diff_index_to_workdir(&self, + index: Option<&Index>, + opts: Option<&mut DiffOptions>) + -> Result { + let mut ret = 0 as *mut raw::git_diff; + unsafe { + try_call!(raw::git_diff_index_to_workdir(&mut ret, + self.raw(), + index.map(|s| s.raw()), + opts.map(|s| s.raw()))); + Ok(Binding::from_raw(ret)) + } + } + + /// Create a diff between a tree and the working directory. + /// + /// The tree you provide will be used for the "old_file" side of the delta, + /// and the working directory will be used for the "new_file" side. + /// + /// This is not the same as `git diff ` or `git diff-index + /// `. Those commands use information from the index, whereas this + /// function strictly returns the differences between the tree and the files + /// in the working directory, regardless of the state of the index. Use + /// `tree_to_workdir_with_index` to emulate those commands. + /// + /// To see difference between this and `tree_to_workdir_with_index`, + /// consider the example of a staged file deletion where the file has then + /// been put back into the working dir and further modified. The + /// tree-to-workdir diff for that file is 'modified', but `git diff` would + /// show status 'deleted' since there is a staged delete. + /// + /// If `None` is passed for `tree`, then an empty tree is used. + pub fn diff_tree_to_workdir(&self, + old_tree: Option<&Tree>, + opts: Option<&mut DiffOptions>) + -> Result { + let mut ret = 0 as *mut raw::git_diff; + unsafe { + try_call!(raw::git_diff_tree_to_workdir(&mut ret, + self.raw(), + old_tree.map(|s| s.raw()), + opts.map(|s| s.raw()))); + Ok(Binding::from_raw(ret)) + } + } + + /// Create a diff between a tree and the working directory using index data + /// to account for staged deletes, tracked files, etc. + /// + /// This emulates `git diff ` by diffing the tree to the index and + /// the index to the working directory and blending the results into a + /// single diff that includes staged deleted, etc. + pub fn diff_tree_to_workdir_with_index(&self, + old_tree: Option<&Tree>, + opts: Option<&mut DiffOptions>) + -> Result { + let mut ret = 0 as *mut raw::git_diff; + unsafe { + try_call!(raw::git_diff_tree_to_workdir_with_index(&mut ret, + self.raw(), old_tree.map(|s| s.raw()), opts.map(|s| s.raw()))); + Ok(Binding::from_raw(ret)) + } + } + + /// Create a PackBuilder + pub fn packbuilder(&self) -> Result { + let mut ret = 0 as *mut raw::git_packbuilder; + unsafe { + try_call!(raw::git_packbuilder_new(&mut ret, self.raw())); + Ok(Binding::from_raw(ret)) + } + } + + /// Save the local modifications to a new stash. + pub fn stash_save(&mut self, + stasher: &Signature, + message: &str, + flags: Option) + -> Result { + unsafe { + let mut raw_oid = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + let message = try!(CString::new(message)); + let flags = flags.unwrap_or(StashFlags::empty()); + try_call!(raw::git_stash_save(&mut raw_oid, + self.raw(), + stasher.raw(), + message, + flags.bits() as c_uint)); + Ok(Binding::from_raw(&raw_oid as *const _)) + } + } + + /// Apply a single stashed state from the stash list. + pub fn stash_apply(&mut self, + index: usize, + opts: Option<&mut StashApplyOptions>) + -> Result<(), Error> { + unsafe { + let opts = opts.map(|opts| opts.raw()); + try_call!(raw::git_stash_apply(self.raw(), index, opts)); + Ok(()) + } + } + + /// Loop over all the stashed states and issue a callback for each one. + /// + /// Return `true` to continue iterating or `false` to stop. + pub fn stash_foreach(&mut self, mut callback: C) -> Result<(), Error> + where C: FnMut(usize, &str, &Oid) -> bool + { + unsafe { + let mut data = StashCbData { callback: &mut callback }; + try_call!(raw::git_stash_foreach(self.raw(), + stash_cb, + &mut data as *mut _ as *mut _)); + Ok(()) + } + } + + /// Remove a single stashed state from the stash list. + pub fn stash_drop(&mut self, index: usize) -> Result<(), Error> { + unsafe { + try_call!(raw::git_stash_drop(self.raw(), index)); + Ok(()) + } + } + + /// Apply a single stashed state from the stash list and remove it from the list if successful. + pub fn stash_pop(&mut self, + index: usize, + opts: Option<&mut StashApplyOptions>) + -> Result<(), Error> { + unsafe { + let opts = opts.map(|opts| opts.raw()); + try_call!(raw::git_stash_pop(self.raw(), index, opts)); + Ok(()) + } + } +} + +impl Binding for Repository { + type Raw = *mut raw::git_repository; + unsafe fn from_raw(ptr: *mut raw::git_repository) -> Repository { + Repository { raw: ptr } + } + fn raw(&self) -> *mut raw::git_repository { self.raw } +} + +impl Drop for Repository { + fn drop(&mut self) { + unsafe { raw::git_repository_free(self.raw) } + } +} + +impl RepositoryInitOptions { + /// Creates a default set of initialization options. + /// + /// By default this will set flags for creating all necessary directories + /// and initializing a directory from the user-configured templates path. + pub fn new() -> RepositoryInitOptions { + RepositoryInitOptions { + flags: raw::GIT_REPOSITORY_INIT_MKDIR as u32 | + raw::GIT_REPOSITORY_INIT_MKPATH as u32 | + raw::GIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE as u32, + mode: 0, + workdir_path: None, + description: None, + template_path: None, + initial_head: None, + origin_url: None, + } + } + + /// Create a bare repository with no working directory. + /// + /// Defaults to false. + pub fn bare(&mut self, bare: bool) -> &mut RepositoryInitOptions { + self.flag(raw::GIT_REPOSITORY_INIT_BARE, bare) + } + + /// Return an error if the repository path appears to already be a git + /// repository. + /// + /// Defaults to false. + pub fn no_reinit(&mut self, enabled: bool) -> &mut RepositoryInitOptions { + self.flag(raw::GIT_REPOSITORY_INIT_NO_REINIT, enabled) + } + + /// Normally a '/.git/' will be appended to the repo apth for non-bare repos + /// (if it is not already there), but passing this flag prevents that + /// behavior. + /// + /// Defaults to false. + pub fn no_dotgit_dir(&mut self, enabled: bool) -> &mut RepositoryInitOptions { + self.flag(raw::GIT_REPOSITORY_INIT_NO_DOTGIT_DIR, enabled) + } + + /// Make the repo path (and workdir path) as needed. The ".git" directory + /// will always be created regardless of this flag. + /// + /// Defaults to true. + pub fn mkdir(&mut self, enabled: bool) -> &mut RepositoryInitOptions { + self.flag(raw::GIT_REPOSITORY_INIT_MKDIR, enabled) + } + + /// Recursively make all components of the repo and workdir path sas + /// necessary. + /// + /// Defaults to true. + pub fn mkpath(&mut self, enabled: bool) -> &mut RepositoryInitOptions { + self.flag(raw::GIT_REPOSITORY_INIT_MKPATH, enabled) + } + + /// Set to one of the `RepositoryInit` constants, or a custom value. + pub fn mode(&mut self, mode: RepositoryInitMode) + -> &mut RepositoryInitOptions { + self.mode = mode.bits(); + self + } + + /// Enable or disable using external templates. + /// + /// If enabled, then the `template_path` option will be queried first, then + /// `init.templatedir` from the global config, and finally + /// `/usr/share/git-core-templates` will be used (if it exists). + /// + /// Defaults to true. + pub fn external_template(&mut self, enabled: bool) + -> &mut RepositoryInitOptions { + self.flag(raw::GIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE, enabled) + } + + fn flag(&mut self, flag: raw::git_repository_init_flag_t, on: bool) + -> &mut RepositoryInitOptions { + if on { + self.flags |= flag as u32; + } else { + self.flags &= !(flag as u32); + } + self + } + + /// The path do the working directory. + /// + /// If this is a relative path it will be evaulated relative to the repo + /// path. If this is not the "natural" working directory, a .git gitlink + /// file will be created here linking to the repo path. + pub fn workdir_path(&mut self, path: &Path) -> &mut RepositoryInitOptions { + self.workdir_path = Some(path.into_c_string().unwrap()); + self + } + + /// If set, this will be used to initialize the "description" file in the + /// repository instead of using the template content. + pub fn description(&mut self, desc: &str) -> &mut RepositoryInitOptions { + self.description = Some(CString::new(desc).unwrap()); + self + } + + /// When the `external_template` option is set, this is the first location + /// to check for the template directory. + /// + /// If this is not configured, then the default locations will be searched + /// instead. + pub fn template_path(&mut self, path: &Path) -> &mut RepositoryInitOptions { + self.template_path = Some(path.into_c_string().unwrap()); + self + } + + /// The name of the head to point HEAD at. + /// + /// If not configured, this will be treated as `master` and the HEAD ref + /// will be set to `refs/heads/master`. If this begins with `refs/` it will + /// be used verbatim; otherwise `refs/heads/` will be prefixed + pub fn initial_head(&mut self, head: &str) -> &mut RepositoryInitOptions { + self.initial_head = Some(CString::new(head).unwrap()); + self + } + + /// If set, then after the rest of the repository initialization is + /// completed an `origin` remote will be added pointing to this URL. + pub fn origin_url(&mut self, url: &str) -> &mut RepositoryInitOptions { + self.origin_url = Some(CString::new(url).unwrap()); + self + } + + /// Creates a set of raw init options to be used with + /// `git_repository_init_ext`. + /// + /// This method is unsafe as the returned value may have pointers to the + /// interior of this structure. + pub unsafe fn raw(&self) -> raw::git_repository_init_options { + let mut opts = mem::zeroed(); + assert_eq!(raw::git_repository_init_init_options(&mut opts, + raw::GIT_REPOSITORY_INIT_OPTIONS_VERSION), 0); + opts.flags = self.flags; + opts.mode = self.mode; + opts.workdir_path = ::call::convert(&self.workdir_path); + opts.description = ::call::convert(&self.description); + opts.template_path = ::call::convert(&self.template_path); + opts.initial_head = ::call::convert(&self.initial_head); + opts.origin_url = ::call::convert(&self.origin_url); + return opts; + } +} + +#[cfg(test)] +mod tests { + use std::ffi::OsStr; + use std::fs; + use std::path::Path; + use tempdir::TempDir; + use {Repository, Oid, ObjectType, ResetType}; + use build::CheckoutBuilder; + + #[test] + fn smoke_init() { + let td = TempDir::new("test").unwrap(); + let path = td.path(); + + let repo = Repository::init(path).unwrap(); + assert!(!repo.is_bare()); + } + + #[test] + fn smoke_init_bare() { + let td = TempDir::new("test").unwrap(); + let path = td.path(); + + let repo = Repository::init_bare(path).unwrap(); + assert!(repo.is_bare()); + assert!(repo.namespace().is_none()); + } + + #[test] + fn smoke_open() { + let td = TempDir::new("test").unwrap(); + let path = td.path(); + Repository::init(td.path()).unwrap(); + let repo = Repository::open(path).unwrap(); + assert!(!repo.is_bare()); + assert!(!repo.is_shallow()); + assert!(repo.is_empty().unwrap()); + assert_eq!(::test::realpath(&repo.path()).unwrap(), + ::test::realpath(&td.path().join(".git/")).unwrap()); + assert_eq!(repo.state(), ::RepositoryState::Clean); + } + + #[test] + fn smoke_open_bare() { + let td = TempDir::new("test").unwrap(); + let path = td.path(); + Repository::init_bare(td.path()).unwrap(); + + let repo = Repository::open(path).unwrap(); + assert!(repo.is_bare()); + assert_eq!(::test::realpath(&repo.path()).unwrap(), + ::test::realpath(&td.path().join("")).unwrap()); + } + + #[test] + fn smoke_checkout() { + let (_td, repo) = ::test::repo_init(); + repo.checkout_head(None).unwrap(); + } + + #[test] + fn smoke_revparse() { + let (_td, repo) = ::test::repo_init(); + let rev = repo.revparse("HEAD").unwrap(); + assert!(rev.to().is_none()); + let from = rev.from().unwrap(); + assert!(rev.from().is_some()); + + assert_eq!(repo.revparse_single("HEAD").unwrap().id(), from.id()); + let obj = repo.find_object(from.id(), None).unwrap().clone(); + obj.peel(ObjectType::Any).unwrap(); + obj.short_id().unwrap(); + repo.reset(&obj, ResetType::Hard, None).unwrap(); + let mut opts = CheckoutBuilder::new(); + t!(repo.reset(&obj, ResetType::Soft, Some(&mut opts))); + } + + #[test] + fn makes_dirs() { + let td = TempDir::new("foo").unwrap(); + Repository::init(&td.path().join("a/b/c/d")).unwrap(); + } + + #[test] + fn smoke_discover() { + let td = TempDir::new("test").unwrap(); + let subdir = td.path().join("subdi"); + fs::create_dir(&subdir).unwrap(); + Repository::init_bare(td.path()).unwrap(); + let repo = Repository::discover(&subdir).unwrap(); + assert_eq!(::test::realpath(&repo.path()).unwrap(), + ::test::realpath(&td.path().join("")).unwrap()); + } + + #[test] + fn smoke_open_ext() { + let td = TempDir::new("test").unwrap(); + let subdir = td.path().join("subdir"); + fs::create_dir(&subdir).unwrap(); + Repository::init(td.path()).unwrap(); + + let repo = Repository::open_ext(&subdir, ::RepositoryOpenFlags::empty(), &[] as &[&OsStr]).unwrap(); + assert!(!repo.is_bare()); + assert_eq!(::test::realpath(&repo.path()).unwrap(), + ::test::realpath(&td.path().join(".git")).unwrap()); + + let repo = Repository::open_ext(&subdir, ::REPOSITORY_OPEN_BARE, &[] as &[&OsStr]).unwrap(); + assert!(repo.is_bare()); + assert_eq!(::test::realpath(&repo.path()).unwrap(), + ::test::realpath(&td.path().join(".git")).unwrap()); + + let err = Repository::open_ext(&subdir, ::REPOSITORY_OPEN_NO_SEARCH, &[] as &[&OsStr]).err().unwrap(); + assert_eq!(err.code(), ::ErrorCode::NotFound); + + assert!(Repository::open_ext(&subdir, + ::RepositoryOpenFlags::empty(), + &[&subdir]).is_ok()); + } + + fn graph_repo_init() -> (TempDir, Repository) { + let (_td, repo) = ::test::repo_init(); + { + let head = repo.head().unwrap().target().unwrap(); + let head = repo.find_commit(head).unwrap(); + + let mut index = repo.index().unwrap(); + let id = index.write_tree().unwrap(); + + let tree = repo.find_tree(id).unwrap(); + let sig = repo.signature().unwrap(); + repo.commit(Some("HEAD"), &sig, &sig, "second", + &tree, &[&head]).unwrap(); + } + (_td, repo) + } + + #[test] + fn smoke_graph_ahead_behind() { + let (_td, repo) = graph_repo_init(); + let head = repo.head().unwrap().target().unwrap(); + let head = repo.find_commit(head).unwrap(); + let head_id = head.id(); + let head_parent_id = head.parent(0).unwrap().id(); + let (ahead, behind) = repo.graph_ahead_behind(head_id, + head_parent_id).unwrap(); + assert_eq!(ahead, 1); + assert_eq!(behind, 0); + let (ahead, behind) = repo.graph_ahead_behind(head_parent_id, + head_id).unwrap(); + assert_eq!(ahead, 0); + assert_eq!(behind, 1); + } + + #[test] + fn smoke_graph_descendant_of() { + let (_td, repo) = graph_repo_init(); + let head = repo.head().unwrap().target().unwrap(); + let head = repo.find_commit(head).unwrap(); + let head_id = head.id(); + let head_parent_id = head.parent(0).unwrap().id(); + assert!(repo.graph_descendant_of(head_id, head_parent_id).unwrap()); + assert!(!repo.graph_descendant_of(head_parent_id, head_id).unwrap()); + } + + #[test] + fn smoke_reference_has_log_ensure_log() { + let (_td, repo) = ::test::repo_init(); + + assert_eq!(repo.reference_has_log("HEAD").unwrap(), true); + assert_eq!(repo.reference_has_log("refs/heads/master").unwrap(), true); + assert_eq!(repo.reference_has_log("NOT_HEAD").unwrap(), false); + let master_oid = repo.revparse_single("master").unwrap().id(); + assert!(repo.reference("NOT_HEAD", master_oid, false, "creating a new branch").is_ok()); + assert_eq!(repo.reference_has_log("NOT_HEAD").unwrap(), false); + assert!(repo.reference_ensure_log("NOT_HEAD").is_ok()); + assert_eq!(repo.reference_has_log("NOT_HEAD").unwrap(), true); + } + + #[test] + fn smoke_set_head() { + let (_td, repo) = ::test::repo_init(); + + assert!(repo.set_head("refs/heads/does-not-exist").is_ok()); + assert!(repo.head().is_err()); + + assert!(repo.set_head("refs/heads/master").is_ok()); + assert!(repo.head().is_ok()); + + assert!(repo.set_head("*").is_err()); + } + + #[test] + fn smoke_set_head_detached() { + let (_td, repo) = ::test::repo_init(); + + let void_oid = Oid::from_bytes(b"00000000000000000000").unwrap(); + assert!(repo.set_head_detached(void_oid).is_err()); + + let master_oid = repo.revparse_single("master").unwrap().id(); + assert!(repo.set_head_detached(master_oid).is_ok()); + assert_eq!(repo.head().unwrap().target().unwrap(), master_oid); + } + + /// create an octopus: + /// /---o2-o4 + /// o1 X + /// \---o3-o5 + /// and checks that the merge bases of (o4,o5) are (o2,o3) + #[test] + fn smoke_merge_bases() { + let (_td, repo) = graph_repo_init(); + let sig = repo.signature().unwrap(); + + // let oid1 = head + let oid1 = repo.head().unwrap().target().unwrap(); + let commit1 = repo.find_commit(oid1).unwrap(); + println!("created oid1 {:?}", oid1); + + repo.branch("branch_a", &commit1, true).unwrap(); + repo.branch("branch_b", &commit1, true).unwrap(); + + // create commit oid2 on branchA + let mut index = repo.index().unwrap(); + let p = Path::new(repo.workdir().unwrap()).join("file_a"); + println!("using path {:?}", p); + fs::File::create(&p).unwrap(); + index.add_path(Path::new("file_a")).unwrap(); + let id_a = index.write_tree().unwrap(); + let tree_a = repo.find_tree(id_a).unwrap(); + let oid2 = repo.commit(Some("refs/heads/branch_a"), &sig, &sig, + "commit 2", &tree_a, &[&commit1]).unwrap(); + let commit2 = repo.find_commit(oid2).unwrap(); + println!("created oid2 {:?}", oid2); + + t!(repo.reset(commit1.as_object(), ResetType::Hard, None)); + + // create commit oid3 on branchB + let mut index = repo.index().unwrap(); + let p = Path::new(repo.workdir().unwrap()).join("file_b"); + fs::File::create(&p).unwrap(); + index.add_path(Path::new("file_b")).unwrap(); + let id_b = index.write_tree().unwrap(); + let tree_b = repo.find_tree(id_b).unwrap(); + let oid3 = repo.commit(Some("refs/heads/branch_b"), &sig, &sig, + "commit 3", &tree_b, &[&commit1]).unwrap(); + let commit3 = repo.find_commit(oid3).unwrap(); + println!("created oid3 {:?}", oid3); + + // create merge commit oid4 on branchA with parents oid2 and oid3 + //let mut index4 = repo.merge_commits(&commit2, &commit3, None).unwrap(); + repo.set_head("refs/heads/branch_a").unwrap(); + repo.checkout_head(None).unwrap(); + let oid4 = repo.commit(Some("refs/heads/branch_a"), &sig, &sig, + "commit 4", &tree_a, + &[&commit2, &commit3]).unwrap(); + //index4.write_tree_to(&repo).unwrap(); + println!("created oid4 {:?}", oid4); + + // create merge commit oid5 on branchB with parents oid2 and oid3 + //let mut index5 = repo.merge_commits(&commit3, &commit2, None).unwrap(); + repo.set_head("refs/heads/branch_b").unwrap(); + repo.checkout_head(None).unwrap(); + let oid5 = repo.commit(Some("refs/heads/branch_b"), &sig, &sig, + "commit 5", &tree_a, + &[&commit3, &commit2]).unwrap(); + //index5.write_tree_to(&repo).unwrap(); + println!("created oid5 {:?}", oid5); + + // merge bases of (oid4,oid5) should be (oid2,oid3) + let merge_bases = repo.merge_bases(oid4, oid5).unwrap(); + let mut found_oid2 = false; + let mut found_oid3 = false; + for mg in merge_bases.iter() { + println!("found merge base {:?}", mg); + if mg == &oid2 { + found_oid2 = true; + } else if mg == &oid3 { + found_oid3 = true; + } else { + assert!(false); + } + } + assert!(found_oid2); + assert!(found_oid3); + assert_eq!(merge_bases.len(), 2); + } + + #[test] + fn smoke_revparse_ext() { + let (_td, repo) = graph_repo_init(); + + { + let short_refname = "master"; + let expected_refname = "refs/heads/master"; + let (obj, reference) = repo.revparse_ext(short_refname).unwrap(); + let expected_obj = repo.revparse_single(expected_refname).unwrap(); + assert_eq!(obj.id(), expected_obj.id()); + assert_eq!(reference.unwrap().name().unwrap(), expected_refname); + } + { + let missing_refname = "refs/heads/does-not-exist"; + assert!(repo.revparse_ext(missing_refname).is_err()); + } + { + let (_obj, reference) = repo.revparse_ext("HEAD^").unwrap(); + assert!(reference.is_none()); + } + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/revspec.rs cargo-0.19.0/vendor/git2-0.6.4/src/revspec.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/revspec.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/revspec.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,26 @@ +use {Object, RevparseMode}; + +/// A revspec represents a range of revisions within a repository. +pub struct Revspec<'repo> { + from: Option>, + to: Option>, + mode: RevparseMode, +} + +impl<'repo> Revspec<'repo> { + /// Assembles a new revspec from the from/to components. + pub fn from_objects(from: Option>, + to: Option>, + mode: RevparseMode) -> Revspec<'repo> { + Revspec { from: from, to: to, mode: mode } + } + + /// Access the `from` range of this revspec. + pub fn from(&self) -> Option<&Object<'repo>> { self.from.as_ref() } + + /// Access the `to` range of this revspec. + pub fn to(&self) -> Option<&Object<'repo>> { self.to.as_ref() } + + /// Returns the intent of the revspec. + pub fn mode(&self) -> RevparseMode { self.mode } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/revwalk.rs cargo-0.19.0/vendor/git2-0.6.4/src/revwalk.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/revwalk.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/revwalk.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,203 @@ +use std::marker; +use std::ffi::CString; +use libc::c_uint; + +use {raw, Error, Sort, Oid, Repository}; +use util::Binding; + +/// A revwalk allows traversal of the commit graph defined by including one or +/// more leaves and excluding one or more roots. +pub struct Revwalk<'repo> { + raw: *mut raw::git_revwalk, + _marker: marker::PhantomData<&'repo Repository>, +} + +impl<'repo> Revwalk<'repo> { + /// Reset a revwalk to allow re-configuring it. + /// + /// The revwalk is automatically reset when iteration of its commits + /// completes. + pub fn reset(&mut self) { + unsafe { raw::git_revwalk_reset(self.raw()) } + } + + /// Set the order in which commits are visited. + pub fn set_sorting(&mut self, sort_mode: Sort) { + unsafe { + raw::git_revwalk_sorting(self.raw(), sort_mode.bits() as c_uint) + } + } + + /// Simplify the history by first-parent + /// + /// No parents other than the first for each commit will be enqueued. + pub fn simplify_first_parent(&mut self) { + unsafe { raw::git_revwalk_simplify_first_parent(self.raw) } + } + + /// Mark a commit to start traversal from. + /// + /// The given OID must belong to a committish on the walked repository. + /// + /// The given commit will be used as one of the roots when starting the + /// revision walk. At least one commit must be pushed onto the walker before + /// a walk can be started. + pub fn push(&mut self, oid: Oid) -> Result<(), Error> { + unsafe { + try_call!(raw::git_revwalk_push(self.raw(), oid.raw())); + } + Ok(()) + } + + /// Push the repository's HEAD + /// + /// For more information, see `push`. + pub fn push_head(&mut self) -> Result<(), Error> { + unsafe { + try_call!(raw::git_revwalk_push_head(self.raw())); + } + Ok(()) + } + + /// Push matching references + /// + /// The OIDs pointed to by the references that match the given glob pattern + /// will be pushed to the revision walker. + /// + /// A leading 'refs/' is implied if not present as well as a trailing `/ \ + /// *` if the glob lacks '?', ' \ *' or '['. + /// + /// Any references matching this glob which do not point to a committish + /// will be ignored. + pub fn push_glob(&mut self, glob: &str) -> Result<(), Error> { + let glob = try!(CString::new(glob)); + unsafe { + try_call!(raw::git_revwalk_push_glob(self.raw, glob)); + } + Ok(()) + } + + /// Push and hide the respective endpoints of the given range. + /// + /// The range should be of the form `..` where each + /// `` is in the form accepted by `revparse_single`. The left-hand + /// commit will be hidden and the right-hand commit pushed. + pub fn push_range(&mut self, range: &str) -> Result<(), Error> { + let range = try!(CString::new(range)); + unsafe { + try_call!(raw::git_revwalk_push_range(self.raw, range)); + } + Ok(()) + } + + /// Push the OID pointed to by a reference + /// + /// The reference must point to a committish. + pub fn push_ref(&mut self, reference: &str) -> Result<(), Error> { + let reference = try!(CString::new(reference)); + unsafe { + try_call!(raw::git_revwalk_push_ref(self.raw, reference)); + } + Ok(()) + } + + /// Mark a commit as not of interest to this revwalk. + pub fn hide(&mut self, oid: Oid) -> Result<(), Error> { + unsafe { + try_call!(raw::git_revwalk_hide(self.raw(), oid.raw())); + } + Ok(()) + } + + /// Hide the repository's HEAD + /// + /// For more information, see `hide`. + pub fn hide_head(&mut self) -> Result<(), Error> { + unsafe { + try_call!(raw::git_revwalk_hide_head(self.raw())); + } + Ok(()) + } + + /// Hide matching references. + /// + /// The OIDs pointed to by the references that match the given glob pattern + /// and their ancestors will be hidden from the output on the revision walk. + /// + /// A leading 'refs/' is implied if not present as well as a trailing `/ \ + /// *` if the glob lacks '?', ' \ *' or '['. + /// + /// Any references matching this glob which do not point to a committish + /// will be ignored. + pub fn hide_glob(&mut self, glob: &str) -> Result<(), Error> { + let glob = try!(CString::new(glob)); + unsafe { + try_call!(raw::git_revwalk_hide_glob(self.raw, glob)); + } + Ok(()) + } + + /// Hide the OID pointed to by a reference. + /// + /// The reference must point to a committish. + pub fn hide_ref(&mut self, reference: &str) -> Result<(), Error> { + let reference = try!(CString::new(reference)); + unsafe { + try_call!(raw::git_revwalk_hide_ref(self.raw, reference)); + } + Ok(()) + } +} + +impl<'repo> Binding for Revwalk<'repo> { + type Raw = *mut raw::git_revwalk; + unsafe fn from_raw(raw: *mut raw::git_revwalk) -> Revwalk<'repo> { + Revwalk { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *mut raw::git_revwalk { self.raw } +} + +impl<'repo> Drop for Revwalk<'repo> { + fn drop(&mut self) { + unsafe { raw::git_revwalk_free(self.raw) } + } +} + +impl<'repo> Iterator for Revwalk<'repo> { + type Item = Result; + fn next(&mut self) -> Option> { + let mut out: raw::git_oid = raw::git_oid{ id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call_iter!(raw::git_revwalk_next(&mut out, self.raw())); + Some(Ok(Binding::from_raw(&out as *const _))) + } + } +} + +#[cfg(test)] +mod tests { + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + let head = repo.head().unwrap(); + let target = head.target().unwrap(); + + let mut walk = repo.revwalk().unwrap(); + walk.push(target).unwrap(); + + let oids: Vec<::Oid> = walk.by_ref().collect::, _>>() + .unwrap(); + + assert_eq!(oids.len(), 1); + assert_eq!(oids[0], target); + + walk.reset(); + walk.push_head().unwrap(); + assert_eq!(walk.by_ref().count(), 1); + + walk.reset(); + walk.push_head().unwrap(); + walk.hide_head().unwrap(); + assert_eq!(walk.by_ref().count(), 0); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/signature.rs cargo-0.19.0/vendor/git2-0.6.4/src/signature.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/signature.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/signature.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,174 @@ +use std::ffi::CString; +use std::marker; +use std::mem; +use std::str; +use std::fmt; +use libc; + +use {raw, Error, Time}; +use util::Binding; + +/// A Signature is used to indicate authorship of various actions throughout the +/// library. +/// +/// Signatures contain a name, email, and timestamp. All fields can be specified +/// with `new` while the `now` constructor omits the timestamp. The +/// [`Repository::signature`] method can be used to create a default signature +/// with name and email values read from the configuration. +/// +/// [`Repository::signature`]: struct.Repository.html#method.signature +pub struct Signature<'a> { + raw: *mut raw::git_signature, + _marker: marker::PhantomData<&'a str>, + owned: bool, +} + +impl<'a> Signature<'a> { + /// Create a new action signature with a timestamp of 'now'. + /// + /// See `new` for more information + pub fn now(name: &str, email: &str) -> Result, Error> { + ::init(); + let mut ret = 0 as *mut raw::git_signature; + let name = try!(CString::new(name)); + let email = try!(CString::new(email)); + unsafe { + try_call!(raw::git_signature_now(&mut ret, name, email)); + Ok(Binding::from_raw(ret)) + } + } + + /// Create a new action signature. + /// + /// The `time` specified is in seconds since the epoch, and the `offset` is + /// the time zone offset in minutes. + /// + /// Returns error if either `name` or `email` contain angle brackets. + pub fn new(name: &str, email: &str, time: &Time) + -> Result, Error> { + ::init(); + let mut ret = 0 as *mut raw::git_signature; + let name = try!(CString::new(name)); + let email = try!(CString::new(email)); + unsafe { + try_call!(raw::git_signature_new(&mut ret, name, email, + time.seconds() as raw::git_time_t, + time.offset_minutes() as libc::c_int)); + Ok(Binding::from_raw(ret)) + } + } + + /// Gets the name on the signature. + /// + /// Returns `None` if the name is not valid utf-8 + pub fn name(&self) -> Option<&str> { + str::from_utf8(self.name_bytes()).ok() + } + + /// Gets the name on the signature as a byte slice. + pub fn name_bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, (*self.raw).name).unwrap() } + } + + /// Gets the email on the signature. + /// + /// Returns `None` if the email is not valid utf-8 + pub fn email(&self) -> Option<&str> { + str::from_utf8(self.email_bytes()).ok() + } + + /// Gets the email on the signature as a byte slice. + pub fn email_bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, (*self.raw).email).unwrap() } + } + + /// Get the `when` of this signature. + pub fn when(&self) -> Time { + unsafe { Binding::from_raw((*self.raw).when) } + } + + /// Convert a signature of any lifetime into an owned signature with a + /// static lifetime. + pub fn to_owned(&self) -> Signature<'static> { + unsafe { + let me = mem::transmute::<&Signature<'a>, &Signature<'static>>(self); + me.clone() + } + } +} + +impl<'a> Binding for Signature<'a> { + type Raw = *mut raw::git_signature; + unsafe fn from_raw(raw: *mut raw::git_signature) -> Signature<'a> { + Signature { + raw: raw, + _marker: marker::PhantomData, + owned: true, + } + } + fn raw(&self) -> *mut raw::git_signature { self.raw } +} + +/// Creates a new signature from the give raw pointer, tied to the lifetime +/// of the given object. +/// +/// This function is unsafe as there is no guarantee that `raw` is valid for +/// `'a` nor if it's a valid pointer. +pub unsafe fn from_raw_const<'b, T>(_lt: &'b T, + raw: *const raw::git_signature) + -> Signature<'b> { + Signature { + raw: raw as *mut raw::git_signature, + _marker: marker::PhantomData, + owned: false, + } +} + +impl Clone for Signature<'static> { + fn clone(&self) -> Signature<'static> { + // TODO: can this be defined for 'a and just do a plain old copy if the + // lifetime isn't static? + let mut raw = 0 as *mut raw::git_signature; + let rc = unsafe { raw::git_signature_dup(&mut raw, &*self.raw) }; + assert_eq!(rc, 0); + unsafe { Binding::from_raw(raw) } + } +} + +impl<'a> Drop for Signature<'a> { + fn drop(&mut self) { + if self.owned { + unsafe { raw::git_signature_free(self.raw) } + } + } +} + +impl<'a> fmt::Display for Signature<'a> { + + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} <{}>", + String::from_utf8_lossy(self.name_bytes()), + String::from_utf8_lossy(self.email_bytes())) + } + +} + +#[cfg(test)] +mod tests { + use {Signature, Time}; + + #[test] + fn smoke() { + Signature::new("foo", "bar", &Time::new(89, 0)).unwrap(); + Signature::now("foo", "bar").unwrap(); + assert!(Signature::new("", "bar", &Time::new(89, 0)).is_err()); + assert!(Signature::now("", "bar").is_err()); + + let s = Signature::now("foo", "bar").unwrap(); + assert_eq!(s.name(), Some("foo")); + assert_eq!(s.email(), Some("bar")); + + drop(s.clone()); + drop(s.to_owned()); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/stash.rs cargo-0.19.0/vendor/git2-0.6.4/src/stash.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/stash.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/stash.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,204 @@ +use {raw, panic, Oid, StashApplyProgress}; +use std::ffi::{CStr}; +use util::{Binding}; +use libc::{c_int, c_char, size_t, c_void}; +use build::{CheckoutBuilder}; +use std::mem; + +/// Stash application progress notification function. +/// +/// Return `true` to continue processing, or `false` to +/// abort the stash application. +pub type StashApplyProgressCb<'a> = FnMut(StashApplyProgress) -> bool + 'a; + +/// This is a callback function you can provide to iterate over all the +/// stashed states that will be invoked per entry. +pub type StashCb<'a> = FnMut(usize, &str, &Oid) -> bool + 'a; + +#[allow(unused)] +/// Stash application options structure +pub struct StashApplyOptions<'cb> { + progress: Option>>, + checkout_options: Option>, + raw_opts: raw::git_stash_apply_options +} + +impl<'cb> StashApplyOptions<'cb> { + /// Creates a default set of merge options. + pub fn new() -> StashApplyOptions<'cb> { + let mut opts = StashApplyOptions { + progress: None, + checkout_options: None, + raw_opts: unsafe { mem::zeroed() }, + }; + assert_eq!(unsafe { + raw::git_stash_apply_init_options(&mut opts.raw_opts, 1) + }, 0); + opts + } + + /// Set stash application flag to GIT_STASH_APPLY_REINSTATE_INDEX + pub fn reinstantiate_index(&mut self) -> &mut StashApplyOptions<'cb> { + self.raw_opts.flags = raw::GIT_STASH_APPLY_REINSTATE_INDEX; + self + } + + /// Options to use when writing files to the working directory + pub fn checkout_options(&mut self, opts: CheckoutBuilder<'cb>) -> &mut StashApplyOptions<'cb> { + self.checkout_options = Some(opts); + self + } + + /// Optional callback to notify the consumer of application progress. + /// + /// Return `true` to continue processing, or `false` to + /// abort the stash application. + pub fn progress_cb(&mut self, callback: C) -> &mut StashApplyOptions<'cb> + where C: FnMut(StashApplyProgress) -> bool + 'cb + { + self.progress = Some(Box::new(callback) as Box>); + self.raw_opts.progress_cb = stash_apply_progress_cb; + self.raw_opts.progress_payload = self as *mut _ as *mut _; + self + } + + /// Pointer to a raw git_stash_apply_options + pub fn raw(&mut self) -> &raw::git_stash_apply_options { + unsafe { + if let Some(opts) = self.checkout_options.as_mut() { + opts.configure(&mut self.raw_opts.checkout_options); + } + } + &self.raw_opts + } +} + +#[allow(unused)] +pub struct StashCbData<'a> { + pub callback: &'a mut StashCb<'a> +} + +#[allow(unused)] +pub extern fn stash_cb(index: size_t, + message: *const c_char, + stash_id: *const raw::git_oid, + payload: *mut c_void) + -> c_int +{ + panic::wrap(|| unsafe { + let mut data = &mut *(payload as *mut StashCbData); + let res = { + let mut callback = &mut data.callback; + callback(index, + CStr::from_ptr(message).to_str().unwrap(), + &Binding::from_raw(stash_id)) + }; + + if res { 0 } else { 1 } + }).unwrap_or(1) +} + +fn convert_progress(progress: raw::git_stash_apply_progress_t) -> StashApplyProgress { + match progress { + raw::GIT_STASH_APPLY_PROGRESS_NONE => StashApplyProgress::None, + raw::GIT_STASH_APPLY_PROGRESS_LOADING_STASH => StashApplyProgress::LoadingStash, + raw::GIT_STASH_APPLY_PROGRESS_ANALYZE_INDEX => StashApplyProgress::AnalyzeIndex, + raw::GIT_STASH_APPLY_PROGRESS_ANALYZE_MODIFIED => StashApplyProgress::AnalyzeModified, + raw::GIT_STASH_APPLY_PROGRESS_ANALYZE_UNTRACKED => StashApplyProgress::AnalyzeUntracked, + raw::GIT_STASH_APPLY_PROGRESS_CHECKOUT_UNTRACKED => StashApplyProgress::CheckoutUntracked, + raw::GIT_STASH_APPLY_PROGRESS_CHECKOUT_MODIFIED => StashApplyProgress::CheckoutModified, + raw::GIT_STASH_APPLY_PROGRESS_DONE => StashApplyProgress::Done, + + _ => StashApplyProgress::None + } +} + +#[allow(unused)] +extern fn stash_apply_progress_cb(progress: raw::git_stash_apply_progress_t, + payload: *mut c_void) + -> c_int +{ + panic::wrap(|| unsafe { + let mut options = &mut *(payload as *mut StashApplyOptions); + let res = { + let mut callback = options.progress.as_mut().unwrap(); + callback(convert_progress(progress)) + }; + + if res { 0 } else { -1 } + }).unwrap_or(-1) +} + +#[cfg(test)] +mod tests { + use stash::{StashApplyOptions}; + use std::io::{Write}; + use std::fs; + use std::path::Path; + use test::{repo_init}; + use {Repository, STATUS_WT_NEW, STASH_INCLUDE_UNTRACKED}; + + fn make_stash(next: C) where C: FnOnce(&mut Repository) { + let (_td, mut repo) = repo_init(); + let signature = repo.signature().unwrap(); + + let p = Path::new(repo.workdir().unwrap()).join("file_b.txt"); + println!("using path {:?}", p); + fs::File::create(&p).unwrap() + .write("data".as_bytes()).unwrap(); + + let rel_p = Path::new("file_b.txt"); + assert!(repo.status_file(&rel_p).unwrap() == STATUS_WT_NEW); + + repo.stash_save(&signature, "msg1", Some(STASH_INCLUDE_UNTRACKED)).unwrap(); + + assert!(repo.status_file(&rel_p).is_err()); + + let mut count = 0; + repo.stash_foreach(|index, name, _oid| { + count += 1; + assert!(index == 0); + assert!(name == "On master: msg1"); + true + }).unwrap(); + + assert!(count == 1); + next(&mut repo); + } + + fn count_stash(repo: &mut Repository) -> usize { + let mut count = 0; + repo.stash_foreach(|_, _, _| { count += 1; true }).unwrap(); + count + } + + #[test] + fn smoke_stash_save_drop() { + make_stash(|repo| { + repo.stash_drop(0).unwrap(); + assert!(count_stash(repo) == 0) + }) + } + + #[test] + fn smoke_stash_save_pop() { + make_stash(|repo| { + repo.stash_pop(0, None).unwrap(); + assert!(count_stash(repo) == 0) + }) + } + + #[test] + fn smoke_stash_save_apply() { + make_stash(|repo| { + let mut options = StashApplyOptions::new(); + options.progress_cb(|progress| { + println!("{:?}", progress); + true + }); + + repo.stash_apply(0, Some(&mut options)).unwrap(); + assert!(count_stash(repo) == 1) + }) + } +} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/status.rs cargo-0.19.0/vendor/git2-0.6.4/src/status.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/status.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/status.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,396 @@ +use std::ffi::CString; +use std::ops::Range; +use std::marker; +use std::mem; +use std::str; +use libc::{c_char, size_t, c_uint}; + +use {raw, Status, DiffDelta, IntoCString, Repository}; +use util::Binding; + +/// Options that can be provided to `repo.statuses()` to control how the status +/// information is gathered. +pub struct StatusOptions { + raw: raw::git_status_options, + pathspec: Vec, + ptrs: Vec<*const c_char>, +} + +/// Enumeration of possible methods of what can be shown through a status +/// operation. +#[derive(Copy, Clone)] +pub enum StatusShow { + /// Only gives status based on HEAD to index comparison, not looking at + /// working directory changes. + Index, + + /// Only gives status based on index to working directory comparison, not + /// comparing the index to the HEAD. + Workdir, + + /// The default, this roughly matches `git status --porcelain` regarding + /// which files are included and in what order. + IndexAndWorkdir, +} + +/// A container for a list of status information about a repository. +/// +/// Each instances appears as a if it were a collection, having a length and +/// allowing indexing as well as provding an iterator. +pub struct Statuses<'repo> { + raw: *mut raw::git_status_list, + + // Hm, not currently present, but can't hurt? + _marker: marker::PhantomData<&'repo Repository>, +} + +/// An iterator over the statuses in a `Statuses` instance. +pub struct StatusIter<'statuses> { + statuses: &'statuses Statuses<'statuses>, + range: Range, +} + +/// A structure representing an entry in the `Statuses` structure. +/// +/// Instances are created through the `.iter()` method or the `.get()` method. +pub struct StatusEntry<'statuses> { + raw: *const raw::git_status_entry, + _marker: marker::PhantomData<&'statuses DiffDelta<'statuses>>, +} + +impl StatusOptions { + /// Creates a new blank set of status options. + pub fn new() -> StatusOptions { + unsafe { + let mut raw = mem::zeroed(); + let r = raw::git_status_init_options(&mut raw, + raw::GIT_STATUS_OPTIONS_VERSION); + assert_eq!(r, 0); + StatusOptions { + raw: raw, + pathspec: Vec::new(), + ptrs: Vec::new(), + } + } + } + + /// Select the files on which to report status. + /// + /// The default, if unspecified, is to show the index and the working + /// directory. + pub fn show(&mut self, show: StatusShow) -> &mut StatusOptions { + self.raw.show = match show { + StatusShow::Index => raw::GIT_STATUS_SHOW_INDEX_ONLY, + StatusShow::Workdir => raw::GIT_STATUS_SHOW_WORKDIR_ONLY, + StatusShow::IndexAndWorkdir => raw::GIT_STATUS_SHOW_INDEX_AND_WORKDIR, + }; + self + } + + /// Add a path pattern to match (using fnmatch-style matching). + /// + /// If the `disable_pathspec_match` option is given, then this is a literal + /// path to match. If this is not called, then there will be no patterns to + /// match and the entire directory will be used. + pub fn pathspec(&mut self, pathspec: T) + -> &mut StatusOptions { + let s = pathspec.into_c_string().unwrap(); + self.ptrs.push(s.as_ptr()); + self.pathspec.push(s); + self + } + + fn flag(&mut self, flag: raw::git_status_opt_t, val: bool) + -> &mut StatusOptions { + if val { + self.raw.flags |= flag as c_uint; + } else { + self.raw.flags &= !(flag as c_uint); + } + self + } + + /// Flag whether untracked files will be included. + /// + /// Untracked files will only be included if the workdir files are included + /// in the status "show" option. + pub fn include_untracked(&mut self, include: bool) -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNTRACKED, include) + } + + /// Flag whether ignored files will be included. + /// + /// The files will only be included if the workdir files are included + /// in the status "show" option. + pub fn include_ignored(&mut self, include: bool) -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_INCLUDE_IGNORED, include) + } + + /// Flag to include unmodified files. + pub fn include_unmodified(&mut self, include: bool) -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNMODIFIED, include) + } + + /// Flag that submodules should be skipped. + /// + /// This only applies if there are no pending typechanges to the submodule + /// (either from or to another type). + pub fn exclude_submodules(&mut self, exclude: bool) -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_EXCLUDE_SUBMODULES, exclude) + } + + /// Flag that all files in untracked directories should be included. + /// + /// Normally if an entire directory is new then just the top-level directory + /// is included (with a trailing slash on the entry name). + pub fn recurse_untracked_dirs(&mut self, include: bool) + -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_RECURSE_UNTRACKED_DIRS, include) + } + + /// Indicates that the given paths should be treated as literals paths, note + /// patterns. + pub fn disable_pathspec_match(&mut self, include: bool) + -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_DISABLE_PATHSPEC_MATCH, include) + } + + /// Indicates that the contents of ignored directories should be included in + /// the status. + pub fn recurse_ignored_dirs(&mut self, include: bool) + -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_RECURSE_IGNORED_DIRS, include) + } + + /// Indicates that rename detection should be processed between the head. + pub fn renames_head_to_index(&mut self, include: bool) + -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_RENAMES_HEAD_TO_INDEX, include) + } + + /// Indicates that rename detection should be run between the index and the + /// working directory. + pub fn renames_index_to_workdir(&mut self, include: bool) + -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_RENAMES_INDEX_TO_WORKDIR, include) + } + + /// Override the native case sensitivity for the file system and force the + /// output to be in case sensitive order. + pub fn sort_case_sensitively(&mut self, include: bool) + -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_SORT_CASE_SENSITIVELY, include) + } + + /// Override the native case sensitivity for the file system and force the + /// output to be in case-insensitive order. + pub fn sort_case_insensitively(&mut self, include: bool) + -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_SORT_CASE_INSENSITIVELY, include) + } + + /// Indicates that rename detection should include rewritten files. + pub fn renames_from_rewrites(&mut self, include: bool) + -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_RENAMES_FROM_REWRITES, include) + } + + /// Bypasses the default status behavior of doing a "soft" index reload. + pub fn no_refresh(&mut self, include: bool) -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_NO_REFRESH, include) + } + + /// Refresh the stat cache in the index for files are unchanged but have + /// out of date stat information in the index. + /// + /// This will result in less work being done on subsequent calls to fetching + /// the status. + pub fn update_index(&mut self, include: bool) -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_UPDATE_INDEX, include) + } + + // erm... + #[allow(missing_docs)] + pub fn include_unreadable(&mut self, include: bool) -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNREADABLE, include) + } + + // erm... + #[allow(missing_docs)] + pub fn include_unreadable_as_untracked(&mut self, include: bool) + -> &mut StatusOptions { + self.flag(raw::GIT_STATUS_OPT_INCLUDE_UNREADABLE_AS_UNTRACKED, include) + } + + /// Get a pointer to the inner list of status options. + /// + /// This function is unsafe as the returned structure has interior pointers + /// and may no longer be valid if these options continue to be mutated. + pub unsafe fn raw(&mut self) -> *const raw::git_status_options { + self.raw.pathspec.strings = self.ptrs.as_ptr() as *mut _; + self.raw.pathspec.count = self.ptrs.len() as size_t; + &self.raw + } +} + +impl<'repo> Statuses<'repo> { + /// Gets a status entry from this list at the specified index. + /// + /// Returns `None` if the index is out of bounds. + pub fn get(&self, index: usize) -> Option { + unsafe { + let p = raw::git_status_byindex(self.raw, index as size_t); + Binding::from_raw_opt(p) + } + } + + /// Gets the count of status entries in this list. + /// + /// If there are no changes in status (at least according the options given + /// when the status list was created), this can return 0. + pub fn len(&self) -> usize { + unsafe { raw::git_status_list_entrycount(self.raw) as usize } + } + + /// Returns an iterator over the statuses in this list. + pub fn iter(&self) -> StatusIter { + StatusIter { + statuses: self, + range: 0..self.len(), + } + } +} + +impl<'repo> Binding for Statuses<'repo> { + type Raw = *mut raw::git_status_list; + unsafe fn from_raw(raw: *mut raw::git_status_list) -> Statuses<'repo> { + Statuses { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *mut raw::git_status_list { self.raw } +} + +impl<'repo> Drop for Statuses<'repo> { + fn drop(&mut self) { + unsafe { raw::git_status_list_free(self.raw); } + } +} + +impl<'a> Iterator for StatusIter<'a> { + type Item = StatusEntry<'a>; + fn next(&mut self) -> Option> { + self.range.next().and_then(|i| self.statuses.get(i)) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +impl<'a> DoubleEndedIterator for StatusIter<'a> { + fn next_back(&mut self) -> Option> { + self.range.next_back().and_then(|i| self.statuses.get(i)) + } +} +impl<'a> ExactSizeIterator for StatusIter<'a> {} + +impl<'statuses> StatusEntry<'statuses> { + /// Access the bytes for this entry's corresponding pathname + pub fn path_bytes(&self) -> &[u8] { + unsafe { + if (*self.raw).head_to_index.is_null() { + ::opt_bytes(self, (*(*self.raw).index_to_workdir).old_file.path) + } else { + ::opt_bytes(self, (*(*self.raw).head_to_index).old_file.path) + }.unwrap() + } + } + + /// Access this entry's path name as a string. + /// + /// Returns `None` if the path is not valid utf-8. + pub fn path(&self) -> Option<&str> { str::from_utf8(self.path_bytes()).ok() } + + /// Access the status flags for this file + pub fn status(&self) -> Status { + Status::from_bits_truncate(unsafe { (*self.raw).status as u32 }) + } + + /// Access detailed information about the differences between the file in + /// HEAD and the file in the index. + pub fn head_to_index(&self) -> Option> { + unsafe { + Binding::from_raw_opt((*self.raw).head_to_index) + } + } + + /// Access detailed information about the differences between the file in + /// the index and the file in the working directory. + pub fn index_to_workdir(&self) -> Option> { + unsafe { + Binding::from_raw_opt((*self.raw).index_to_workdir) + } + } +} + +impl<'statuses> Binding for StatusEntry<'statuses> { + type Raw = *const raw::git_status_entry; + + unsafe fn from_raw(raw: *const raw::git_status_entry) + -> StatusEntry<'statuses> { + StatusEntry { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *const raw::git_status_entry { self.raw } +} + +#[cfg(test)] +mod tests { + use std::fs::File; + use std::path::Path; + use std::io::prelude::*; + use super::StatusOptions; + + #[test] + fn smoke() { + let (td, repo) = ::test::repo_init(); + assert_eq!(repo.statuses(None).unwrap().len(), 0); + File::create(&td.path().join("foo")).unwrap(); + let statuses = repo.statuses(None).unwrap(); + assert_eq!(statuses.iter().count(), 1); + let status = statuses.iter().next().unwrap(); + assert_eq!(status.path(), Some("foo")); + assert!(status.status().contains(::STATUS_WT_NEW)); + assert!(!status.status().contains(::STATUS_INDEX_NEW)); + assert!(status.head_to_index().is_none()); + let diff = status.index_to_workdir().unwrap(); + assert_eq!(diff.old_file().path_bytes().unwrap(), b"foo"); + assert_eq!(diff.new_file().path_bytes().unwrap(), b"foo"); + } + + #[test] + fn filter() { + let (td, repo) = ::test::repo_init(); + t!(File::create(&td.path().join("foo"))); + t!(File::create(&td.path().join("bar"))); + let mut opts = StatusOptions::new(); + opts.include_untracked(true) + .pathspec("foo"); + + let statuses = t!(repo.statuses(Some(&mut opts))); + assert_eq!(statuses.iter().count(), 1); + let status = statuses.iter().next().unwrap(); + assert_eq!(status.path(), Some("foo")); + } + + #[test] + fn gitignore() { + let (td, repo) = ::test::repo_init(); + t!(t!(File::create(td.path().join(".gitignore"))).write_all(b"foo\n")); + assert!(!t!(repo.status_should_ignore(Path::new("bar")))); + assert!(t!(repo.status_should_ignore(Path::new("foo")))); + } + + #[test] + fn status_file() { + let (td, repo) = ::test::repo_init(); + assert!(repo.status_file(Path::new("foo")).is_err()); + t!(File::create(td.path().join("foo"))); + let status = t!(repo.status_file(Path::new("foo"))); + assert!(status.contains(::STATUS_WT_NEW)); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/string_array.rs cargo-0.19.0/vendor/git2-0.6.4/src/string_array.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/string_array.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/string_array.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,106 @@ +//! Bindings to libgit2's raw git_strarray type + +use std::str; +use std::ops::Range; + +use raw; +use util::Binding; + +/// A string array structure used by libgit2 +/// +/// Some apis return arrays of strings which originate from libgit2. This +/// wrapper type behaves a little like `Vec<&str>` but does so without copying +/// the underlying strings until necessary. +pub struct StringArray { + raw: raw::git_strarray, +} + +/// A forward iterator over the strings of an array, casted to `&str`. +pub struct Iter<'a> { + range: Range, + arr: &'a StringArray, +} + +/// A forward iterator over the strings of an array, casted to `&[u8]`. +pub struct IterBytes<'a> { + range: Range, + arr: &'a StringArray, +} + +impl StringArray { + /// Returns None if the i'th string is not utf8 or if i is out of bounds. + pub fn get(&self, i: usize) -> Option<&str> { + self.get_bytes(i).and_then(|s| str::from_utf8(s).ok()) + } + + /// Returns None if `i` is out of bounds. + pub fn get_bytes(&self, i: usize) -> Option<&[u8]> { + if i < self.raw.count as usize { + unsafe { + let ptr = *self.raw.strings.offset(i as isize) as *const _; + Some(::opt_bytes(self, ptr).unwrap()) + } + } else { + None + } + } + + /// Returns an iterator over the strings contained within this array. + /// + /// The iterator yields `Option<&str>` as it is unknown whether the contents + /// are utf-8 or not. + pub fn iter(&self) -> Iter { + Iter { range: 0..self.len(), arr: self } + } + + /// Returns an iterator over the strings contained within this array, + /// yielding byte slices. + pub fn iter_bytes(&self) -> IterBytes { + IterBytes { range: 0..self.len(), arr: self } + } + + /// Returns the number of strings in this array. + pub fn len(&self) -> usize { self.raw.count as usize } +} + +impl Binding for StringArray { + type Raw = raw::git_strarray; + unsafe fn from_raw(raw: raw::git_strarray) -> StringArray { + StringArray { raw: raw } + } + fn raw(&self) -> raw::git_strarray { self.raw } +} + +impl<'a> Iterator for Iter<'a> { + type Item = Option<&'a str>; + fn next(&mut self) -> Option> { + self.range.next().map(|i| self.arr.get(i)) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +impl<'a> DoubleEndedIterator for Iter<'a> { + fn next_back(&mut self) -> Option> { + self.range.next_back().map(|i| self.arr.get(i)) + } +} +impl<'a> ExactSizeIterator for Iter<'a> {} + +impl<'a> Iterator for IterBytes<'a> { + type Item = &'a [u8]; + fn next(&mut self) -> Option<&'a [u8]> { + self.range.next().and_then(|i| self.arr.get_bytes(i)) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +impl<'a> DoubleEndedIterator for IterBytes<'a> { + fn next_back(&mut self) -> Option<&'a [u8]> { + self.range.next_back().and_then(|i| self.arr.get_bytes(i)) + } +} +impl<'a> ExactSizeIterator for IterBytes<'a> {} + +impl Drop for StringArray { + fn drop(&mut self) { + unsafe { raw::git_strarray_free(&mut self.raw) } + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/submodule.rs cargo-0.19.0/vendor/git2-0.6.4/src/submodule.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/submodule.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/submodule.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,233 @@ +use std::marker; +use std::str; +use std::path::Path; + +use {raw, Oid, Repository, Error}; +use util::{self, Binding}; + +/// A structure to represent a git [submodule][1] +/// +/// [1]: http://git-scm.com/book/en/Git-Tools-Submodules +pub struct Submodule<'repo> { + raw: *mut raw::git_submodule, + _marker: marker::PhantomData<&'repo Repository>, +} + +impl<'repo> Submodule<'repo> { + /// Get the submodule's branch. + /// + /// Returns `None` if the branch is not valid utf-8 or if the branch is not + /// yet available. + pub fn branch(&self) -> Option<&str> { + self.branch_bytes().and_then(|s| str::from_utf8(s).ok()) + } + + /// Get the branch for the submodule. + /// + /// Returns `None` if the branch is not yet available. + pub fn branch_bytes(&self) -> Option<&[u8]> { + unsafe { + ::opt_bytes(self, raw::git_submodule_branch(self.raw)) + } + } + + /// Get the submodule's url. + /// + /// Returns `None` if the url is not valid utf-8 + pub fn url(&self) -> Option<&str> { str::from_utf8(self.url_bytes()).ok() } + + /// Get the url for the submodule. + pub fn url_bytes(&self) -> &[u8] { + unsafe { + ::opt_bytes(self, raw::git_submodule_url(self.raw)).unwrap() + } + } + + /// Get the submodule's name. + /// + /// Returns `None` if the name is not valid utf-8 + pub fn name(&self) -> Option<&str> { str::from_utf8(self.name_bytes()).ok() } + + /// Get the name for the submodule. + pub fn name_bytes(&self) -> &[u8] { + unsafe { + ::opt_bytes(self, raw::git_submodule_name(self.raw)).unwrap() + } + } + + /// Get the path for the submodule. + pub fn path(&self) -> &Path { + util::bytes2path(unsafe { + ::opt_bytes(self, raw::git_submodule_path(self.raw)).unwrap() + }) + } + + /// Get the OID for the submodule in the current HEAD tree. + pub fn head_id(&self) -> Option { + unsafe { + Binding::from_raw_opt(raw::git_submodule_head_id(self.raw)) + } + } + + /// Get the OID for the submodule in the index. + pub fn index_id(&self) -> Option { + unsafe { + Binding::from_raw_opt(raw::git_submodule_index_id(self.raw)) + } + } + + /// Get the OID for the submodule in the current working directory. + /// + /// This returns the OID that corresponds to looking up 'HEAD' in the + /// checked out submodule. If there are pending changes in the index or + /// anything else, this won't notice that. + pub fn workdir_id(&self) -> Option { + unsafe { + Binding::from_raw_opt(raw::git_submodule_wd_id(self.raw)) + } + } + + /// Copy submodule info into ".git/config" file. + /// + /// Just like "git submodule init", this copies information about the + /// submodule into ".git/config". You can use the accessor functions above + /// to alter the in-memory git_submodule object and control what is written + /// to the config, overriding what is in .gitmodules. + /// + /// By default, existing entries will not be overwritten, but passing `true` + /// for `overwrite` forces them to be updated. + pub fn init(&mut self, overwrite: bool) -> Result<(), Error> { + unsafe { + try_call!(raw::git_submodule_init(self.raw, overwrite)); + } + Ok(()) + } + + /// Open the repository for a submodule. + /// + /// This will only work if the submodule is checked out into the working + /// directory. + pub fn open(&self) -> Result { + let mut raw = 0 as *mut raw::git_repository; + unsafe { + try_call!(raw::git_submodule_open(&mut raw, self.raw)); + Ok(Binding::from_raw(raw)) + } + } + + /// Reread submodule info from config, index, and HEAD. + /// + /// Call this to reread cached submodule information for this submodule if + /// you have reason to believe that it has changed. + /// + /// If `force` is `true`, then data will be reloaded even if it doesn't seem + /// out of date + pub fn reload(&mut self, force: bool) -> Result<(), Error> { + unsafe { + try_call!(raw::git_submodule_reload(self.raw, force)); + } + Ok(()) + } + + /// Copy submodule remote info into submodule repo. + /// + /// This copies the information about the submodules URL into the checked + /// out submodule config, acting like "git submodule sync". This is useful + /// if you have altered the URL for the submodule (or it has been altered + /// by a fetch of upstream changes) and you need to update your local repo. + pub fn sync(&mut self) -> Result<(), Error> { + unsafe { try_call!(raw::git_submodule_sync(self.raw)); } + Ok(()) + } + + /// Add current submodule HEAD commit to index of superproject. + /// + /// If `write_index` is true, then the index file will be immediately + /// written. Otherwise you must explicitly call `write()` on an `Index` + /// later on. + pub fn add_to_index(&mut self, write_index: bool) -> Result<(), Error> { + unsafe { + try_call!(raw::git_submodule_add_to_index(self.raw, write_index)); + } + Ok(()) + } + + /// Resolve the setup of a new git submodule. + /// + /// This should be called on a submodule once you have called add setup and + /// done the clone of the submodule. This adds the .gitmodules file and the + /// newly cloned submodule to the index to be ready to be committed (but + /// doesn't actually do the commit). + pub fn add_finalize(&mut self) -> Result<(), Error> { + unsafe { try_call!(raw::git_submodule_add_finalize(self.raw)); } + Ok(()) + } +} + +impl<'repo> Binding for Submodule<'repo> { + type Raw = *mut raw::git_submodule; + unsafe fn from_raw(raw: *mut raw::git_submodule) -> Submodule<'repo> { + Submodule { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *mut raw::git_submodule { self.raw } +} + +impl<'repo> Drop for Submodule<'repo> { + fn drop(&mut self) { + unsafe { raw::git_submodule_free(self.raw) } + } +} + +#[cfg(test)] +mod tests { + use std::path::Path; + use std::fs; + use tempdir::TempDir; + use url::Url; + + use Repository; + + #[test] + fn smoke() { + let td = TempDir::new("test").unwrap(); + let repo = Repository::init(td.path()).unwrap(); + let mut s1 = repo.submodule("/path/to/nowhere", + Path::new("foo"), true).unwrap(); + s1.init(false).unwrap(); + s1.sync().unwrap(); + + let s2 = repo.submodule("/path/to/nowhere", + Path::new("bar"), true).unwrap(); + drop((s1, s2)); + + let mut submodules = repo.submodules().unwrap(); + assert_eq!(submodules.len(), 2); + let mut s = submodules.remove(0); + assert_eq!(s.name(), Some("bar")); + assert_eq!(s.url(), Some("/path/to/nowhere")); + assert_eq!(s.branch(), None); + assert!(s.head_id().is_none()); + assert!(s.index_id().is_none()); + assert!(s.workdir_id().is_none()); + + repo.find_submodule("bar").unwrap(); + s.open().unwrap(); + assert!(s.path() == Path::new("bar")); + s.reload(true).unwrap(); + } + + #[test] + fn add_a_submodule() { + let (_td, repo1) = ::test::repo_init(); + let (td, repo2) = ::test::repo_init(); + + let url = Url::from_file_path(&repo1.workdir().unwrap()).unwrap(); + let mut s = repo2.submodule(&url.to_string(), Path::new("bar"), + true).unwrap(); + t!(fs::remove_dir_all(td.path().join("bar"))); + t!(Repository::clone(&url.to_string(), + td.path().join("bar"))); + t!(s.add_to_index(false)); + t!(s.add_finalize()); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/tag.rs cargo-0.19.0/vendor/git2-0.6.4/src/tag.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/tag.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/tag.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,173 @@ +use std::marker; +use std::mem; +use std::str; + +use {raw, signature, Error, Oid, Object, Signature, ObjectType}; +use util::Binding; + +/// A structure to represent a git [tag][1] +/// +/// [1]: http://git-scm.com/book/en/Git-Basics-Tagging +pub struct Tag<'repo> { + raw: *mut raw::git_tag, + _marker: marker::PhantomData>, +} + +impl<'repo> Tag<'repo> { + /// Get the id (SHA1) of a repository tag + pub fn id(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_tag_id(&*self.raw)) } + } + + /// Get the message of a tag + /// + /// Returns None if there is no message or if it is not valid utf8 + pub fn message(&self) -> Option<&str> { + self.message_bytes().and_then(|s| str::from_utf8(s).ok()) + } + + /// Get the message of a tag + /// + /// Returns None if there is no message + pub fn message_bytes(&self) -> Option<&[u8]> { + unsafe { ::opt_bytes(self, raw::git_tag_message(&*self.raw)) } + } + + /// Get the name of a tag + /// + /// Returns None if it is not valid utf8 + pub fn name(&self) -> Option<&str> { + str::from_utf8(self.name_bytes()).ok() + } + + /// Get the name of a tag + pub fn name_bytes(&self) -> &[u8] { + unsafe { ::opt_bytes(self, raw::git_tag_name(&*self.raw)).unwrap() } + } + + /// Recursively peel a tag until a non tag git_object is found + pub fn peel(&self) -> Result, Error> { + let mut ret = 0 as *mut raw::git_object; + unsafe { + try_call!(raw::git_tag_peel(&mut ret, &*self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// Get the tagger (author) of a tag + /// + /// If the author is unspecified, then `None` is returned. + pub fn tagger(&self) -> Option { + unsafe { + let ptr = raw::git_tag_tagger(&*self.raw); + if ptr.is_null() { + None + } else { + Some(signature::from_raw_const(self, ptr)) + } + } + } + + /// Get the tagged object of a tag + /// + /// This method performs a repository lookup for the given object and + /// returns it + pub fn target(&self) -> Result, Error> { + let mut ret = 0 as *mut raw::git_object; + unsafe { + try_call!(raw::git_tag_target(&mut ret, &*self.raw)); + Ok(Binding::from_raw(ret)) + } + } + + /// Get the OID of the tagged object of a tag + pub fn target_id(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_tag_target_id(&*self.raw)) } + } + + /// Get the OID of the tagged object of a tag + pub fn target_type(&self) -> Option { + unsafe { ObjectType::from_raw(raw::git_tag_target_type(&*self.raw)) } + } + + /// Casts this Tag to be usable as an `Object` + pub fn as_object(&self) -> &Object<'repo> { + unsafe { + &*(self as *const _ as *const Object<'repo>) + } + } + + /// Consumes Tag to be returned as an `Object` + pub fn into_object(self) -> Object<'repo> { + assert_eq!(mem::size_of_val(&self), mem::size_of::()); + unsafe { + mem::transmute(self) + } + } +} + +impl<'repo> Binding for Tag<'repo> { + type Raw = *mut raw::git_tag; + unsafe fn from_raw(raw: *mut raw::git_tag) -> Tag<'repo> { + Tag { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *mut raw::git_tag { self.raw } +} + +impl<'repo> Drop for Tag<'repo> { + fn drop(&mut self) { + unsafe { raw::git_tag_free(self.raw) } + } +} + +#[cfg(test)] +mod tests { + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + let head = repo.head().unwrap(); + let id = head.target().unwrap(); + assert!(repo.find_tag(id).is_err()); + + let obj = repo.find_object(id, None).unwrap(); + let sig = repo.signature().unwrap(); + let tag_id = repo.tag("foo", &obj, &sig, "msg", false).unwrap(); + let tag = repo.find_tag(tag_id).unwrap(); + assert_eq!(tag.id(), tag_id); + + let tags = repo.tag_names(None).unwrap(); + assert_eq!(tags.len(), 1); + assert_eq!(tags.get(0), Some("foo")); + + assert_eq!(tag.name(), Some("foo")); + assert_eq!(tag.message(), Some("msg")); + assert_eq!(tag.peel().unwrap().id(), obj.id()); + assert_eq!(tag.target_id(), obj.id()); + assert_eq!(tag.target_type(), Some(::ObjectType::Commit)); + + assert_eq!(tag.tagger().unwrap().name(), sig.name()); + tag.target().unwrap(); + tag.into_object(); + + repo.find_object(tag_id, None).unwrap().as_tag().unwrap(); + repo.find_object(tag_id, None).unwrap().into_tag().ok().unwrap(); + + repo.tag_delete("foo").unwrap(); + } + + #[test] + fn lite() { + let (_td, repo) = ::test::repo_init(); + let head = t!(repo.head()); + let id = head.target().unwrap(); + let obj = t!(repo.find_object(id, None)); + let tag_id = t!(repo.tag_lightweight("foo", &obj, false)); + assert!(repo.find_tag(tag_id).is_err()); + assert_eq!(t!(repo.refname_to_id("refs/tags/foo")), id); + + let tags = t!(repo.tag_names(Some("f*"))); + assert_eq!(tags.len(), 1); + let tags = t!(repo.tag_names(Some("b*"))); + assert_eq!(tags.len(), 0); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/test.rs cargo-0.19.0/vendor/git2-0.6.4/src/test.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/test.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/test.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,59 @@ +use std::path::{Path, PathBuf}; +use std::io; +use tempdir::TempDir; +use url::Url; + +use Repository; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {}", stringify!($e), e), + }) +} + +pub fn repo_init() -> (TempDir, Repository) { + let td = TempDir::new("test").unwrap(); + let repo = Repository::init(td.path()).unwrap(); + { + let mut config = repo.config().unwrap(); + config.set_str("user.name", "name").unwrap(); + config.set_str("user.email", "email").unwrap(); + let mut index = repo.index().unwrap(); + let id = index.write_tree().unwrap(); + + let tree = repo.find_tree(id).unwrap(); + let sig = repo.signature().unwrap(); + repo.commit(Some("HEAD"), &sig, &sig, "initial", + &tree, &[]).unwrap(); + } + (td, repo) +} + +pub fn path2url(path: &Path) -> String { + Url::from_file_path(path).unwrap().to_string() +} + +#[cfg(windows)] +pub fn realpath(original: &Path) -> io::Result { + Ok(original.to_path_buf()) +} +#[cfg(unix)] +pub fn realpath(original: &Path) -> io::Result { + use std::ffi::{CStr, OsString, CString}; + use std::os::unix::prelude::*; + use libc::{self, c_char}; + extern { + fn realpath(name: *const c_char, resolved: *mut c_char) -> *mut c_char; + } + unsafe { + let cstr = try!(CString::new(original.as_os_str().as_bytes())); + let ptr = realpath(cstr.as_ptr(), 0 as *mut _); + if ptr.is_null() { + return Err(io::Error::last_os_error()) + } + let bytes = CStr::from_ptr(ptr).to_bytes().to_vec(); + libc::free(ptr as *mut _); + Ok(PathBuf::from(OsString::from_vec(bytes))) + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/time.rs cargo-0.19.0/vendor/git2-0.6.4/src/time.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/time.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/time.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,95 @@ +use std::cmp::Ordering; + +use libc::c_int; + +use raw; +use util::Binding; + +/// Time in a signature +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct Time { + raw: raw::git_time, +} + +/// Time structure used in a git index entry. +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct IndexTime { + raw: raw::git_index_time, +} + +impl Time { + /// Creates a new time structure from its components. + pub fn new(time: i64, offset: i32) -> Time { + unsafe { + Binding::from_raw(raw::git_time { + time: time as raw::git_time_t, + offset: offset as c_int, + }) + } + } + + /// Return the time, in seconds, from epoch + pub fn seconds(&self) -> i64 { self.raw.time as i64 } + + /// Return the timezone offset, in minutes + pub fn offset_minutes(&self) -> i32 { self.raw.offset as i32 } +} + +impl PartialOrd for Time { + fn partial_cmp(&self, other: &Time) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Time { + fn cmp(&self, other: &Time) -> Ordering { + (self.raw.time, self.raw.offset).cmp(&(other.raw.time, other.raw.offset)) + } +} + +impl Binding for Time { + type Raw = raw::git_time; + unsafe fn from_raw(raw: raw::git_time) -> Time { + Time { raw: raw } + } + fn raw(&self) -> raw::git_time { self.raw } +} + +impl IndexTime { + /// Creates a new time structure from its components. + pub fn new(seconds: i32, nanoseconds: u32) -> IndexTime { + unsafe { + Binding::from_raw(raw::git_index_time { + seconds: seconds, + nanoseconds: nanoseconds, + }) + } + } + + /// Returns the number of seconds in the second component of this time. + pub fn seconds(&self) -> i32 { self.raw.seconds } + /// Returns the nanosecond component of this time. + pub fn nanoseconds(&self) -> u32 { self.raw.nanoseconds } +} + +impl Binding for IndexTime { + type Raw = raw::git_index_time; + unsafe fn from_raw(raw: raw::git_index_time) -> IndexTime { + IndexTime { raw: raw } + } + fn raw(&self) -> raw::git_index_time { self.raw } +} + +impl PartialOrd for IndexTime { + fn partial_cmp(&self, other: &IndexTime) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for IndexTime { + fn cmp(&self, other: &IndexTime) -> Ordering { + let me = (self.raw.seconds, self.raw.nanoseconds); + let other = (other.raw.seconds, other.raw.nanoseconds); + me.cmp(&other) + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/transport.rs cargo-0.19.0/vendor/git2-0.6.4/src/transport.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/transport.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/transport.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,325 @@ +//! Interfaces for adding custom transports to libgit2 + +use std::ffi::{CStr, CString}; +use std::io::prelude::*; +use std::io; +use std::mem; +use std::slice; +use std::str; +use libc::{c_int, c_void, c_uint, c_char, size_t}; + +use {raw, panic, Error, Remote}; +use util::Binding; + +/// A transport is a structure which knows how to transfer data to and from a +/// remote. +/// +/// This transport is a representation of the raw transport underneath it, which +/// is similar to a trait object in Rust. +#[allow(missing_copy_implementations)] +pub struct Transport { + raw: *mut raw::git_transport, + owned: bool, +} + +/// Interfaced used by smart transports. +/// +/// The full-fledged definiton of transports has to deal with lots of +/// nitty-gritty details of the git protocol, but "smart transports" largely +/// only need to deal with read() and write() of data over a channel. +/// +/// A smart subtransport is contained within an instance of a smart transport +/// and is delegated to in order to actually conduct network activity to push or +/// pull data from a remote. +pub trait SmartSubtransport: Send + 'static { + /// Indicates that this subtransport will be performing the specified action + /// on the specified URL. + /// + /// This function is responsible for making any network connections and + /// returns a stream which can be read and written from in order to + /// negotiate the git protocol. + fn action(&self, url: &str, action: Service) + -> Result, Error>; + + /// Terminates a connection with the remote. + /// + /// Each subtransport is guaranteed a call to close() between calls to + /// action(), except for the following tow natural progressions of actions + /// against a constant URL. + /// + /// 1. UploadPackLs -> UploadPack + /// 2. ReceivePackLs -> ReceivePack + fn close(&self) -> Result<(), Error>; +} + +/// Actions that a smart transport can ask a subtransport to perform +#[derive(Copy, Clone)] +#[allow(missing_docs)] +pub enum Service { + UploadPackLs, + UploadPack, + ReceivePackLs, + ReceivePack, +} + +/// An instance of a stream over which a smart transport will communicate with a +/// remote. +/// +/// Currently this only requires the standard `Read` and `Write` traits. This +/// trait also does not need to be implemented manually as long as the `Read` +/// and `Write` traits are implemented. +pub trait SmartSubtransportStream: Read + Write + Send + 'static {} + +impl SmartSubtransportStream for T {} + +type TransportFactory = Fn(&Remote) -> Result + Send + Sync + + 'static; + +/// Boxed data payload used for registering new transports. +/// +/// Currently only contains a field which knows how to create transports. +struct TransportData { + factory: Box, +} + +/// Instance of a `git_smart_subtransport`, must use `#[repr(C)]` to ensure that +/// the C fields come first. +#[repr(C)] +struct RawSmartSubtransport { + raw: raw::git_smart_subtransport, + obj: Box, +} + +/// Instance of a `git_smart_subtransport_stream`, must use `#[repr(C)]` to +/// ensure that the C fields come first. +#[repr(C)] +struct RawSmartSubtransportStream { + raw: raw::git_smart_subtransport_stream, + obj: Box, +} + +/// Add a custom transport definition, to be used in addition to the built-in +/// set of transports that come with libgit2. +/// +/// This function is unsafe as it needs to be externally synchronized with calls +/// to creation of other transports. +pub unsafe fn register(prefix: &str, factory: F) -> Result<(), Error> + where F: Fn(&Remote) -> Result + Send + Sync + 'static +{ + let mut data = Box::new(TransportData { + factory: Box::new(factory), + }); + let prefix = try!(CString::new(prefix)); + let datap = (&mut *data) as *mut TransportData as *mut c_void; + try_call!(raw::git_transport_register(prefix, + transport_factory, + datap)); + mem::forget(data); + Ok(()) +} + +impl Transport { + /// Creates a new transport which will use the "smart" transport protocol + /// for transferring data. + /// + /// A smart transport requires a *subtransport* over which data is actually + /// communicated, but this subtransport largely just needs to be able to + /// read() and write(). The subtransport provided will be used to make + /// connections which can then be read/written from. + /// + /// The `rpc` argument is `true` if the protocol is stateless, false + /// otherwise. For example `http://` is stateless but `git://` is not. + pub fn smart(remote: &Remote, + rpc: bool, + subtransport: S) -> Result + where S: SmartSubtransport + { + let mut ret = 0 as *mut _; + + let mut raw = Box::new(RawSmartSubtransport { + raw: raw::git_smart_subtransport { + action: subtransport_action, + close: subtransport_close, + free: subtransport_free, + }, + obj: Box::new(subtransport), + }); + let mut defn = raw::git_smart_subtransport_definition { + callback: smart_factory, + rpc: rpc as c_uint, + param: &mut *raw as *mut _ as *mut _, + }; + + // Currently there's no way to pass a paload via the + // git_smart_subtransport_definition structure, but it's only used as a + // configuration for the initial creation of the smart transport (verified + // by reading the current code, hopefully it doesn't change!). + // + // We, however, need some state (gotta pass in our + // `RawSmartSubtransport`). This also means that this block must be + // entirely synchronized with a lock (boo!) + unsafe { + try_call!(raw::git_transport_smart(&mut ret, remote.raw(), + &mut defn as *mut _ as *mut _)); + mem::forget(raw); // ownership transport to `ret` + } + return Ok(Transport { raw: ret, owned: true }); + + extern fn smart_factory(out: *mut *mut raw::git_smart_subtransport, + _owner: *mut raw::git_transport, + ptr: *mut c_void) -> c_int { + unsafe { + *out = ptr as *mut raw::git_smart_subtransport; + 0 + } + } + } +} + +impl Drop for Transport { + fn drop(&mut self) { + if self.owned { + unsafe { + ((*self.raw).free)(self.raw) + } + } + } +} + +// callback used by register() to create new transports +extern fn transport_factory(out: *mut *mut raw::git_transport, + owner: *mut raw::git_remote, + param: *mut c_void) -> c_int { + struct Bomb<'a> { remote: Option> } + impl<'a> Drop for Bomb<'a> { + fn drop(&mut self) { + // TODO: maybe a method instead? + mem::forget(self.remote.take()); + } + } + + panic::wrap(|| unsafe { + let remote = Bomb { remote: Some(Binding::from_raw(owner)) }; + let data = &mut *(param as *mut TransportData); + match (data.factory)(remote.remote.as_ref().unwrap()) { + Ok(mut transport) => { + *out = transport.raw; + transport.owned = false; + 0 + } + Err(e) => e.raw_code() as c_int, + } + }).unwrap_or(-1) +} + +// callback used by smart transports to delegate an action to a +// `SmartSubtransport` trait object. +extern fn subtransport_action(stream: *mut *mut raw::git_smart_subtransport_stream, + raw_transport: *mut raw::git_smart_subtransport, + url: *const c_char, + action: raw::git_smart_service_t) -> c_int { + panic::wrap(|| unsafe { + let url = CStr::from_ptr(url).to_bytes(); + let url = match str::from_utf8(url).ok() { + Some(s) => s, + None => return -1, + }; + let action = match action { + raw::GIT_SERVICE_UPLOADPACK_LS => Service::UploadPackLs, + raw::GIT_SERVICE_UPLOADPACK => Service::UploadPack, + raw::GIT_SERVICE_RECEIVEPACK_LS => Service::ReceivePackLs, + raw::GIT_SERVICE_RECEIVEPACK => Service::ReceivePack, + n => panic!("unknown action: {}", n), + }; + let transport = &mut *(raw_transport as *mut RawSmartSubtransport); + let obj = match transport.obj.action(url, action) { + Ok(s) => s, + Err(e) => return e.raw_code() as c_int, + }; + *stream = mem::transmute(Box::new(RawSmartSubtransportStream { + raw: raw::git_smart_subtransport_stream { + subtransport: raw_transport, + read: stream_read, + write: stream_write, + free: stream_free, + }, + obj: obj, + })); + 0 + }).unwrap_or(-1) +} + +// callback used by smart transports to close a `SmartSubtransport` trait +// object. +extern fn subtransport_close(transport: *mut raw::git_smart_subtransport) + -> c_int { + let ret = panic::wrap(|| unsafe { + let transport = &mut *(transport as *mut RawSmartSubtransport); + transport.obj.close() + }); + match ret { + Some(Ok(())) => 0, + Some(Err(e)) => e.raw_code() as c_int, + None => -1, + } +} + +// callback used by smart transports to free a `SmartSubtransport` trait +// object. +extern fn subtransport_free(transport: *mut raw::git_smart_subtransport) { + let _ = panic::wrap(|| unsafe { + mem::transmute::<_, Box>(transport); + }); +} + +// callback used by smart transports to read from a `SmartSubtransportStream` +// object. +extern fn stream_read(stream: *mut raw::git_smart_subtransport_stream, + buffer: *mut c_char, + buf_size: size_t, + bytes_read: *mut size_t) -> c_int { + let ret = panic::wrap(|| unsafe { + let transport = &mut *(stream as *mut RawSmartSubtransportStream); + let buf = slice::from_raw_parts_mut(buffer as *mut u8, + buf_size as usize); + match transport.obj.read(buf) { + Ok(n) => { *bytes_read = n as size_t; Ok(n) } + e => e, + } + }); + match ret { + Some(Ok(_)) => 0, + Some(Err(e)) => unsafe { set_err(e); -2 }, + None => -1, + } +} + +// callback used by smart transports to write to a `SmartSubtransportStream` +// object. +extern fn stream_write(stream: *mut raw::git_smart_subtransport_stream, + buffer: *const c_char, + len: size_t) -> c_int { + let ret = panic::wrap(|| unsafe { + let transport = &mut *(stream as *mut RawSmartSubtransportStream); + let buf = slice::from_raw_parts(buffer as *const u8, len as usize); + transport.obj.write_all(buf) + }); + match ret { + Some(Ok(())) => 0, + Some(Err(e)) => unsafe { set_err(e); -2 }, + None => -1, + } +} + +unsafe fn set_err(e: io::Error) { + let s = CString::new(e.to_string()).unwrap(); + raw::giterr_set_str(raw::GITERR_NET as c_int, s.as_ptr()) +} + +// callback used by smart transports to free a `SmartSubtransportStream` +// object. +extern fn stream_free(stream: *mut raw::git_smart_subtransport_stream) { + let _ = panic::wrap(|| unsafe { + mem::transmute::<_, Box>(stream); + }); +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/treebuilder.rs cargo-0.19.0/vendor/git2-0.6.4/src/treebuilder.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/treebuilder.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/treebuilder.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,193 @@ +use std::marker; + +use libc::{c_int, c_void}; + +use {panic, raw, tree, Error, Oid, Repository, TreeEntry}; +use util::{Binding, IntoCString}; + +/// Constructor for in-memory trees +pub struct TreeBuilder<'repo> { + raw: *mut raw::git_treebuilder, + _marker: marker::PhantomData<&'repo Repository>, +} + +impl<'repo> TreeBuilder<'repo> { + /// Clear all the entries in the builder + pub fn clear(&mut self) { + unsafe { raw::git_treebuilder_clear(self.raw) } + } + + /// Get the number of entries + pub fn len(&self) -> usize { + unsafe { raw::git_treebuilder_entrycount(self.raw) as usize } + } + + /// Get en entry from the builder from its filename + pub fn get

(&self, filename: P) -> Result, Error> + where P: IntoCString + { + let filename = try!(filename.into_c_string()); + unsafe { + let ret = raw::git_treebuilder_get(self.raw, filename.as_ptr()); + if ret.is_null() { + Ok(None) + } else { + Ok(Some(tree::entry_from_raw_const(ret))) + } + } + } + + /// Add or update an entry in the builder + /// + /// No attempt is made to ensure that the provided Oid points to + /// an object of a reasonable type (or any object at all). + /// + /// The mode given must be one of 0o040000, 0o100644, 0o100755, 0o120000 or + /// 0o160000 currently. + pub fn insert(&mut self, filename: P, oid: Oid, + filemode: i32) -> Result { + let filename = try!(filename.into_c_string()); + let filemode = filemode as raw::git_filemode_t; + + let mut ret = 0 as *const raw::git_tree_entry; + unsafe { + try_call!(raw::git_treebuilder_insert(&mut ret, self.raw, filename, + oid.raw(), filemode)); + Ok(tree::entry_from_raw_const(ret)) + } + } + + /// Remove an entry from the builder by its filename + pub fn remove(&mut self, filename: P) -> Result<(), Error> { + let filename = try!(filename.into_c_string()); + unsafe { + try_call!(raw::git_treebuilder_remove(self.raw, filename)); + } + Ok(()) + } + + /// Selectively remove entries from the tree + /// + /// Values for which the filter returns `true` will be kept. Note + /// that this behavior is different from the libgit2 C interface. + pub fn filter(&mut self, mut filter: F) + where F: FnMut(&TreeEntry) -> bool + { + let mut cb: &mut FilterCb = &mut filter; + let ptr = &mut cb as *mut _; + unsafe { + raw::git_treebuilder_filter(self.raw, filter_cb, ptr as *mut _); + panic::check(); + } + } + + /// Write the contents of the TreeBuilder as a Tree object and + /// return its Oid + pub fn write(&self) -> Result { + let mut raw = raw::git_oid { id: [0; raw::GIT_OID_RAWSZ] }; + unsafe { + try_call!(raw::git_treebuilder_write(&mut raw, self.raw())); + Ok(Binding::from_raw(&raw as *const _)) + } + } +} + +type FilterCb<'a> = FnMut(&TreeEntry) -> bool + 'a; + +extern fn filter_cb(entry: *const raw::git_tree_entry, + payload: *mut c_void) -> c_int { + let ret = panic::wrap(|| unsafe { + // There's no way to return early from git_treebuilder_filter. + if panic::panicked() { + true + } else { + let entry = tree::entry_from_raw_const(entry); + let payload = payload as *mut &mut FilterCb; + (*payload)(&entry) + } + }); + if ret == Some(false) {1} else {0} +} + +impl<'repo> Binding for TreeBuilder<'repo> { + type Raw = *mut raw::git_treebuilder; + + unsafe fn from_raw(raw: *mut raw::git_treebuilder) -> TreeBuilder<'repo> { + TreeBuilder { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *mut raw::git_treebuilder { self.raw } +} + +impl<'repo> Drop for TreeBuilder<'repo> { + fn drop(&mut self) { + unsafe { raw::git_treebuilder_free(self.raw) } + } +} + +#[cfg(test)] +mod tests { + use ObjectType; + + #[test] + fn smoke() { + let (_td, repo) = ::test::repo_init(); + + let mut builder = repo.treebuilder(None).unwrap(); + assert_eq!(builder.len(), 0); + let blob = repo.blob(b"data").unwrap(); + { + let entry = builder.insert("a", blob, 0o100644).unwrap(); + assert_eq!(entry.kind(), Some(ObjectType::Blob)); + } + builder.insert("b", blob, 0o100644).unwrap(); + assert_eq!(builder.len(), 2); + builder.remove("a").unwrap(); + assert_eq!(builder.len(), 1); + assert_eq!(builder.get("b").unwrap().unwrap().id(), blob); + builder.clear(); + assert_eq!(builder.len(), 0); + } + + #[test] + fn write() { + let (_td, repo) = ::test::repo_init(); + + let mut builder = repo.treebuilder(None).unwrap(); + let data = repo.blob(b"data").unwrap(); + builder.insert("name", data, 0o100644).unwrap(); + let tree = builder.write().unwrap(); + let tree = repo.find_tree(tree).unwrap(); + let entry = tree.get(0).unwrap(); + assert_eq!(entry.name(), Some("name")); + let blob = entry.to_object(&repo).unwrap(); + let blob = blob.as_blob().unwrap(); + assert_eq!(blob.content(), b"data"); + + let builder = repo.treebuilder(Some(&tree)).unwrap(); + assert_eq!(builder.len(), 1); + } + + #[test] + fn filter() { + let (_td, repo) = ::test::repo_init(); + + let mut builder = repo.treebuilder(None).unwrap(); + let blob = repo.blob(b"data").unwrap(); + let tree = { + let head = repo.head().unwrap() + .peel(ObjectType::Commit).unwrap(); + let head = head.as_commit().unwrap(); + head.tree_id() + }; + builder.insert("blob", blob, 0o100644).unwrap(); + builder.insert("dir", tree, 0o040000).unwrap(); + builder.insert("dir2", tree, 0o040000).unwrap(); + + builder.filter(|_| true); + assert_eq!(builder.len(), 3); + builder.filter(|e| e.kind().unwrap() != ObjectType::Blob); + assert_eq!(builder.len(), 2); + builder.filter(|_| false); + assert_eq!(builder.len(), 0); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/tree.rs cargo-0.19.0/vendor/git2-0.6.4/src/tree.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/tree.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/tree.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,388 @@ +use std::mem; +use std::cmp::Ordering; +use std::ffi::CString; +use std::ops::Range; +use std::marker; +use std::path::Path; +use std::str; +use libc; + +use {raw, Oid, Repository, Error, Object, ObjectType}; +use util::{Binding, IntoCString}; + +/// A structure to represent a git [tree][1] +/// +/// [1]: http://git-scm.com/book/en/Git-Internals-Git-Objects +pub struct Tree<'repo> { + raw: *mut raw::git_tree, + _marker: marker::PhantomData>, +} + +/// A structure representing an entry inside of a tree. An entry is borrowed +/// from a tree. +pub struct TreeEntry<'tree> { + raw: *mut raw::git_tree_entry, + owned: bool, + _marker: marker::PhantomData<&'tree raw::git_tree_entry>, +} + +/// An iterator over the entries in a tree. +pub struct TreeIter<'tree> { + range: Range, + tree: &'tree Tree<'tree>, +} + +impl<'repo> Tree<'repo> { + /// Get the id (SHA1) of a repository object + pub fn id(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_tree_id(&*self.raw)) } + } + + /// Get the number of entries listed in this tree. + pub fn len(&self) -> usize { + unsafe { raw::git_tree_entrycount(&*self.raw) as usize } + } + + /// Returns an iterator over the entries in this tree. + pub fn iter(&self) -> TreeIter { + TreeIter { range: 0..self.len(), tree: self } + } + + /// Lookup a tree entry by SHA value. + pub fn get_id(&self, id: Oid) -> Option { + unsafe { + let ptr = raw::git_tree_entry_byid(&*self.raw(), &*id.raw()); + if ptr.is_null() { + None + } else { + Some(entry_from_raw_const(ptr)) + } + } + } + + /// Lookup a tree entry by its position in the tree + pub fn get(&self, n: usize) -> Option { + unsafe { + let ptr = raw::git_tree_entry_byindex(&*self.raw(), + n as libc::size_t); + if ptr.is_null() { + None + } else { + Some(entry_from_raw_const(ptr)) + } + } + } + + /// Lookup a tree entry by its filename + pub fn get_name(&self, filename: &str) -> Option { + let filename = CString::new(filename).unwrap(); + unsafe { + let ptr = call!(raw::git_tree_entry_byname(&*self.raw(), filename)); + if ptr.is_null() { + None + } else { + Some(entry_from_raw_const(ptr)) + } + } + } + + /// Retrieve a tree entry contained in a tree or in any of its subtrees, + /// given its relative path. + pub fn get_path(&self, path: &Path) -> Result, Error> { + let path = try!(path.into_c_string()); + let mut ret = 0 as *mut raw::git_tree_entry; + unsafe { + try_call!(raw::git_tree_entry_bypath(&mut ret, &*self.raw(), path)); + Ok(Binding::from_raw(ret)) + } + } + + /// Casts this Tree to be usable as an `Object` + pub fn as_object(&self) -> &Object<'repo> { + unsafe { + &*(self as *const _ as *const Object<'repo>) + } + } + + /// Consumes Commit to be returned as an `Object` + pub fn into_object(self) -> Object<'repo> { + assert_eq!(mem::size_of_val(&self), mem::size_of::()); + unsafe { + mem::transmute(self) + } + } +} + +impl<'repo> Binding for Tree<'repo> { + type Raw = *mut raw::git_tree; + + unsafe fn from_raw(raw: *mut raw::git_tree) -> Tree<'repo> { + Tree { raw: raw, _marker: marker::PhantomData } + } + fn raw(&self) -> *mut raw::git_tree { self.raw } +} + +impl<'repo> Drop for Tree<'repo> { + fn drop(&mut self) { + unsafe { raw::git_tree_free(self.raw) } + } +} + +impl<'repo, 'iter> IntoIterator for &'iter Tree<'repo> { + type Item = TreeEntry<'iter>; + type IntoIter = TreeIter<'iter>; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +/// Create a new tree entry from the raw pointer provided. +/// +/// The lifetime of the entry is tied to the tree provided and the function +/// is unsafe because the validity of the pointer cannot be guaranteed. +pub unsafe fn entry_from_raw_const<'tree>(raw: *const raw::git_tree_entry) + -> TreeEntry<'tree> { + TreeEntry { + raw: raw as *mut raw::git_tree_entry, + owned: false, + _marker: marker::PhantomData, + } +} + +impl<'tree> TreeEntry<'tree> { + /// Get the id of the object pointed by the entry + pub fn id(&self) -> Oid { + unsafe { Binding::from_raw(raw::git_tree_entry_id(&*self.raw)) } + } + + /// Get the filename of a tree entry + /// + /// Returns `None` if the name is not valid utf-8 + pub fn name(&self) -> Option<&str> { + str::from_utf8(self.name_bytes()).ok() + } + + /// Get the filename of a tree entry + pub fn name_bytes(&self) -> &[u8] { + unsafe { + ::opt_bytes(self, raw::git_tree_entry_name(&*self.raw())).unwrap() + } + } + + /// Convert a tree entry to the object it points to. + pub fn to_object<'a>(&self, repo: &'a Repository) + -> Result, Error> { + let mut ret = 0 as *mut raw::git_object; + unsafe { + try_call!(raw::git_tree_entry_to_object(&mut ret, repo.raw(), + &*self.raw())); + Ok(Binding::from_raw(ret)) + } + } + + /// Get the type of the object pointed by the entry + pub fn kind(&self) -> Option { + ObjectType::from_raw(unsafe { raw::git_tree_entry_type(&*self.raw) }) + } + + /// Get the UNIX file attributes of a tree entry + pub fn filemode(&self) -> i32 { + unsafe { raw::git_tree_entry_filemode(&*self.raw) as i32 } + } + + /// Get the raw UNIX file attributes of a tree entry + pub fn filemode_raw(&self) -> i32 { + unsafe { raw::git_tree_entry_filemode_raw(&*self.raw) as i32 } + } + + /// Convert this entry of any lifetime into an owned signature with a static + /// lifetime. + /// + /// This will use the `Clone::clone` implementation under the hood. + pub fn to_owned(&self) -> TreeEntry<'static> { + unsafe { + let me = mem::transmute::<&TreeEntry<'tree>, &TreeEntry<'static>>(self); + me.clone() + } + } +} + +impl<'a> Binding for TreeEntry<'a> { + type Raw = *mut raw::git_tree_entry; + unsafe fn from_raw(raw: *mut raw::git_tree_entry) -> TreeEntry<'a> { + TreeEntry { + raw: raw, + owned: true, + _marker: marker::PhantomData, + } + } + fn raw(&self) -> *mut raw::git_tree_entry { self.raw } +} + +impl<'a> Clone for TreeEntry<'a> { + fn clone(&self) -> TreeEntry<'a> { + let mut ret = 0 as *mut raw::git_tree_entry; + unsafe { + assert_eq!(raw::git_tree_entry_dup(&mut ret, &*self.raw()), 0); + Binding::from_raw(ret) + } + } +} + +impl<'a> PartialOrd for TreeEntry<'a> { + fn partial_cmp(&self, other: &TreeEntry<'a>) -> Option { + Some(self.cmp(other)) + } +} +impl<'a> Ord for TreeEntry<'a> { + fn cmp(&self, other: &TreeEntry<'a>) -> Ordering { + match unsafe { raw::git_tree_entry_cmp(&*self.raw(), &*other.raw()) } { + 0 => Ordering::Equal, + n if n < 0 => Ordering::Less, + _ => Ordering::Greater, + } + } +} + +impl<'a> PartialEq for TreeEntry<'a> { + fn eq(&self, other: &TreeEntry<'a>) -> bool { + self.cmp(other) == Ordering::Equal + } +} +impl<'a> Eq for TreeEntry<'a> {} + +impl<'a> Drop for TreeEntry<'a> { + fn drop(&mut self) { + if self.owned { + unsafe { raw::git_tree_entry_free(self.raw) } + } + } +} + +impl<'tree> Iterator for TreeIter<'tree> { + type Item = TreeEntry<'tree>; + fn next(&mut self) -> Option> { + self.range.next().and_then(|i| self.tree.get(i)) + } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +impl<'tree> DoubleEndedIterator for TreeIter<'tree> { + fn next_back(&mut self) -> Option> { + self.range.next_back().and_then(|i| self.tree.get(i)) + } +} +impl<'tree> ExactSizeIterator for TreeIter<'tree> {} + +#[cfg(test)] +mod tests { + use {Repository,Tree,TreeEntry,ObjectType,Object}; + use tempdir::TempDir; + use std::fs::File; + use std::io::prelude::*; + use std::path::Path; + + pub struct TestTreeIter<'a> { + entries: Vec>, + repo: &'a Repository, + } + + impl<'a> Iterator for TestTreeIter<'a> { + type Item = TreeEntry<'a>; + + fn next(&mut self) -> Option > { + if self.entries.is_empty() { + None + } else { + let entry = self.entries.remove(0); + + match entry.kind() { + Some(ObjectType::Tree) => { + let obj: Object<'a> = entry.to_object(self.repo).unwrap(); + + let tree: &Tree<'a> = obj.as_tree().unwrap(); + + for entry in tree.iter() { + self.entries.push(entry.to_owned()); + } + } + _ => {} + } + + Some(entry) + } + } + } + + fn tree_iter<'repo>(tree: &Tree<'repo>, repo: &'repo Repository) + -> TestTreeIter<'repo> { + let mut initial = vec![]; + + for entry in tree.iter() { + initial.push(entry.to_owned()); + } + + TestTreeIter { + entries: initial, + repo: repo, + } + } + + #[test] + fn smoke_tree_iter() { + let (td, repo) = ::test::repo_init(); + + setup_repo(&td, &repo); + + let head = repo.head().unwrap(); + let target = head.target().unwrap(); + let commit = repo.find_commit(target).unwrap(); + + let tree = repo.find_tree(commit.tree_id()).unwrap(); + assert_eq!(tree.id(), commit.tree_id()); + assert_eq!(tree.len(), 1); + + for entry in tree_iter(&tree, &repo) { + println!("iter entry {:?}", entry.name()); + } + } + + fn setup_repo(td: &TempDir, repo: &Repository) { + let mut index = repo.index().unwrap(); + File::create(&td.path().join("foo")).unwrap().write_all(b"foo").unwrap(); + index.add_path(Path::new("foo")).unwrap(); + let id = index.write_tree().unwrap(); + let sig = repo.signature().unwrap(); + let tree = repo.find_tree(id).unwrap(); + let parent = repo.find_commit(repo.head().unwrap().target() + .unwrap()).unwrap(); + repo.commit(Some("HEAD"), &sig, &sig, "another commit", + &tree, &[&parent]).unwrap(); + } + + #[test] + fn smoke() { + let (td, repo) = ::test::repo_init(); + + setup_repo(&td, &repo); + + let head = repo.head().unwrap(); + let target = head.target().unwrap(); + let commit = repo.find_commit(target).unwrap(); + + let tree = repo.find_tree(commit.tree_id()).unwrap(); + assert_eq!(tree.id(), commit.tree_id()); + assert_eq!(tree.len(), 1); + { + let e1 = tree.get(0).unwrap(); + assert!(e1 == tree.get_id(e1.id()).unwrap()); + assert!(e1 == tree.get_name("foo").unwrap()); + assert!(e1 == tree.get_path(Path::new("foo")).unwrap()); + assert_eq!(e1.name(), Some("foo")); + e1.to_object(&repo).unwrap(); + } + tree.into_object(); + + repo.find_object(commit.tree_id(), None).unwrap().as_tree().unwrap(); + repo.find_object(commit.tree_id(), None).unwrap().into_tree().ok().unwrap(); + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/src/util.rs cargo-0.19.0/vendor/git2-0.6.4/src/util.rs --- cargo-0.17.0/vendor/git2-0.6.4/src/util.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/src/util.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,152 @@ +use std::ffi::{CString, OsStr, OsString}; +use std::iter::IntoIterator; +use std::path::{Path, PathBuf}; +use libc::{c_char, size_t}; + +use {raw, Error}; + +#[doc(hidden)] +pub trait IsNull { + fn is_ptr_null(&self) -> bool; +} +impl IsNull for *const T { + fn is_ptr_null(&self) -> bool { + self.is_null() + } +} +impl IsNull for *mut T { + fn is_ptr_null(&self) -> bool { + self.is_null() + } +} + +#[doc(hidden)] +pub trait Binding: Sized { + type Raw; + + unsafe fn from_raw(raw: Self::Raw) -> Self; + fn raw(&self) -> Self::Raw; + + unsafe fn from_raw_opt(raw: T) -> Option + where T: Copy + IsNull, Self: Binding + { + if raw.is_ptr_null() { + None + } else { + Some(Binding::from_raw(raw)) + } + } +} + +pub fn iter2cstrs(iter: I) -> Result<(Vec, Vec<*const c_char>, + raw::git_strarray), Error> + where T: IntoCString, I: IntoIterator +{ + let cstrs: Vec<_> = try!(iter.into_iter().map(|i| i.into_c_string()).collect()); + let ptrs = cstrs.iter().map(|i| i.as_ptr()).collect::>(); + let raw = raw::git_strarray { + strings: ptrs.as_ptr() as *mut _, + count: ptrs.len() as size_t, + }; + Ok((cstrs, ptrs, raw)) +} + +#[cfg(unix)] +pub fn bytes2path(b: &[u8]) -> &Path { + use std::os::unix::prelude::*; + Path::new(OsStr::from_bytes(b)) +} +#[cfg(windows)] +pub fn bytes2path(b: &[u8]) -> &Path { + use std::str; + Path::new(str::from_utf8(b).unwrap()) +} + +/// A class of types that can be converted to C strings. +/// +/// These types are represented internally as byte slices and it is quite rare +/// for them to contain an interior 0 byte. +pub trait IntoCString { + /// Consume this container, converting it into a CString + fn into_c_string(self) -> Result; +} + +impl<'a, T: IntoCString + Clone> IntoCString for &'a T { + fn into_c_string(self) -> Result { + self.clone().into_c_string() + } +} + +impl<'a> IntoCString for &'a str { + fn into_c_string(self) -> Result { + Ok(try!(CString::new(self))) + } +} + +impl IntoCString for String { + fn into_c_string(self) -> Result { + Ok(try!(CString::new(self.into_bytes()))) + } +} + +impl IntoCString for CString { + fn into_c_string(self) -> Result { Ok(self) } +} + +impl<'a> IntoCString for &'a Path { + fn into_c_string(self) -> Result { + let s: &OsStr = self.as_ref(); + s.into_c_string() + } +} + +impl IntoCString for PathBuf { + fn into_c_string(self) -> Result { + let s: OsString = self.into(); + s.into_c_string() + } +} + +impl<'a> IntoCString for &'a OsStr { + fn into_c_string(self) -> Result { + self.to_os_string().into_c_string() + } +} + +impl IntoCString for OsString { + #[cfg(unix)] + fn into_c_string(self) -> Result { + use std::os::unix::prelude::*; + let s: &OsStr = self.as_ref(); + Ok(try!(CString::new(s.as_bytes()))) + } + #[cfg(windows)] + fn into_c_string(self) -> Result { + match self.to_str() { + Some(s) => s.into_c_string(), + None => Err(Error::from_str("only valid unicode paths are accepted \ + on windows")), + } + } +} + +impl<'a> IntoCString for &'a [u8] { + fn into_c_string(self) -> Result { + Ok(try!(CString::new(self))) + } +} + +impl IntoCString for Vec { + fn into_c_string(self) -> Result { + Ok(try!(CString::new(self))) + } +} + +pub fn into_opt_c_string(opt_s: Option) -> Result, Error> + where S: IntoCString +{ + match opt_s { + None => Ok(None), + Some(s) => Ok(Some(try!(s.into_c_string()))), + } +} diff -Nru cargo-0.17.0/vendor/git2-0.6.4/.travis.yml cargo-0.19.0/vendor/git2-0.6.4/.travis.yml --- cargo-0.17.0/vendor/git2-0.6.4/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/git2-0.6.4/.travis.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,44 @@ +language: rust +rust: + - stable + - beta + - nightly +sudo: false +before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH +script: + - cargo test --no-default-features + - cargo test + - cargo run --manifest-path systest/Cargo.toml --release + - if [ "$TRAVIS_RUST_VERSION" = "nightly" ]; then + cargo test --features unstable; + cargo test --manifest-path git2-curl/Cargo.toml; + fi + - cargo doc --no-deps + - cargo doc --manifest-path=git2-curl/Cargo.toml --no-deps + - cargo doc --manifest-path=libgit2-sys/Cargo.toml --no-deps +after_success: + - travis-cargo --only nightly doc-upload + - travis-cargo coveralls --no-sudo +notifications: + email: + on_success: never +matrix: + include: + - os: osx + rust: stable + before_install: + - export OPENSSL_INCLUDE_DIR=`brew --prefix openssl`/include + - export OPENSSL_LIB_DIR=`brew --prefix openssl`/lib +addons: + apt: + sources: + - kalakris-cmake + packages: + - cmake + - libcurl4-openssl-dev + - libelf-dev + - libdw-dev +env: + global: + secure: "SVk5cv4VnBQAoaBXt9pIHk+FQ7Z58zT5EaPo7Ac81LltKztwHovhN/R1otKzgrAJqFsZ/nKR4cGyQGbYtfVJcsqweQVM75LI6Oh6lYyEdfX211ZI3SWQ50JO93CmwLtanC5UpECdXvJLCgXrHGJXuL1oi7hySGy47/yQlKH6eaM=" diff -Nru cargo-0.17.0/vendor/itoa-0.3.1/benches/bench.rs cargo-0.19.0/vendor/itoa-0.3.1/benches/bench.rs --- cargo-0.17.0/vendor/itoa-0.3.1/benches/bench.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/itoa-0.3.1/benches/bench.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,52 @@ +#![feature(test)] +#![allow(non_snake_case)] + +extern crate itoa; +extern crate test; + +macro_rules! benches { + ($($name:ident($value:expr),)*) => { + mod bench_itoa { + use test::{Bencher, black_box}; + $( + #[bench] + fn $name(b: &mut Bencher) { + use itoa; + + let mut buf = Vec::with_capacity(20); + + b.iter(|| { + buf.clear(); + itoa::write(&mut buf, black_box($value)).unwrap() + }); + } + )* + } + + mod bench_fmt { + use test::{Bencher, black_box}; + $( + #[bench] + fn $name(b: &mut Bencher) { + use std::io::Write; + + let mut buf = Vec::with_capacity(20); + + b.iter(|| { + buf.clear(); + write!(&mut buf, "{}", black_box($value)).unwrap() + }); + } + )* + } + } +} + +benches!( + bench_0u64(0u64), + bench_HALFu64(::max_value() as u64), + bench_MAXu64(::max_value()), + + bench_0i16(0i16), + bench_MINi16(::min_value()), +); diff -Nru cargo-0.17.0/vendor/itoa-0.3.1/.cargo-checksum.json cargo-0.19.0/vendor/itoa-0.3.1/.cargo-checksum.json --- cargo-0.17.0/vendor/itoa-0.3.1/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/itoa-0.3.1/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"a2b867b2e28af9bde20a669a6ff0f366ecc5150b89314cd7ec97ed95bb427547","Cargo.toml":"82b9e862ca8c12656987883e7339d992b770b2a8b23a9cd9ceb5ae0083252687","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e18259ab3aa7f39a194795bdad8039b3c5fd544f6dd922526c9326c44842b76d","README.md":"f2b713cdc7ade373c4a733c09693cecd288201ec76bde725de65b4ff74530284","benches/bench.rs":"3e7075b70a899ab1e926403856afeb04b34a254b234bbca834f6136a703008a3","performance.png":"a6e70647a44084e65cedaaff3633b0624b37e0f0a84457362c1e078fb56c877d","src/lib.rs":"16169ef9fc6c6a6521daff8fefdfc1b54f4ce145763b9733308d6631dad4d14e","tests/test.rs":"9c7629f758e2833757c15617cd8c1ec2a2fb8437865d05b5d20abb07279d35ea"},"package":"eb2f404fbc66fd9aac13e998248505e7ecb2ad8e44ab6388684c5fb11c6c251c"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/itoa-0.3.1/Cargo.toml cargo-0.19.0/vendor/itoa-0.3.1/Cargo.toml --- cargo-0.17.0/vendor/itoa-0.3.1/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/itoa-0.3.1/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,9 @@ +[package] +name = "itoa" +version = "0.3.1" +authors = ["David Tolnay "] +license = "MIT/Apache-2.0" +description = "Fast functions for printing integer primitives to an io::Write" +repository = "https://github.com/dtolnay/itoa" +documentation = "https://github.com/dtolnay/itoa" +categories = ["value-formatting"] diff -Nru cargo-0.17.0/vendor/itoa-0.3.1/.gitignore cargo-0.19.0/vendor/itoa-0.3.1/.gitignore --- cargo-0.17.0/vendor/itoa-0.3.1/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/itoa-0.3.1/.gitignore 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,2 @@ +target +Cargo.lock diff -Nru cargo-0.17.0/vendor/itoa-0.3.1/LICENSE-APACHE cargo-0.19.0/vendor/itoa-0.3.1/LICENSE-APACHE --- cargo-0.17.0/vendor/itoa-0.3.1/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/itoa-0.3.1/LICENSE-APACHE 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.17.0/vendor/itoa-0.3.1/LICENSE-MIT cargo-0.19.0/vendor/itoa-0.3.1/LICENSE-MIT --- cargo-0.17.0/vendor/itoa-0.3.1/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/itoa-0.3.1/LICENSE-MIT 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,25 @@ +Copyright (c) 2016 Itoa Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. Binary files /tmp/tmplxiqkI/U7kx02RAH_/cargo-0.17.0/vendor/itoa-0.3.1/performance.png and /tmp/tmplxiqkI/L4nBRNMLnv/cargo-0.19.0/vendor/itoa-0.3.1/performance.png differ diff -Nru cargo-0.17.0/vendor/itoa-0.3.1/README.md cargo-0.19.0/vendor/itoa-0.3.1/README.md --- cargo-0.17.0/vendor/itoa-0.3.1/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/itoa-0.3.1/README.md 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,70 @@ +itoa +==== + +[![Build Status](https://api.travis-ci.org/dtolnay/itoa.svg?branch=master)](https://travis-ci.org/dtolnay/itoa) +[![Latest Version](https://img.shields.io/crates/v/itoa.svg)](https://crates.io/crates/itoa) + +This crate provides fast functions for printing integer primitives to an +[`io::Write`](https://doc.rust-lang.org/std/io/trait.Write.html). The +implementation comes straight from +[libcore](https://github.com/rust-lang/rust/blob/b8214dc6c6fc20d0a660fb5700dca9ebf51ebe89/src/libcore/fmt/num.rs#L201-L254) +but avoids the performance penalty of going through +[`fmt::Formatter`](https://doc.rust-lang.org/std/fmt/struct.Formatter.html). + +See also [`dtoa`](https://github.com/dtolnay/dtoa) for printing floating point +primitives. + +## Performance + +![performance](https://raw.githubusercontent.com/dtolnay/itoa/master/performance.png) + +## Functions + +```rust +extern crate itoa; + +// write to a vector or other io::Write +let mut buf = Vec::new(); +itoa::write(&mut buf, 128u64)?; +println!("{:?}", buf); + +// write to a stack buffer +let mut bytes = [b'\0'; 20]; +let n = itoa::write(&mut bytes[..], 128u64)?; +println!("{:?}", &bytes[..n]); +``` + +The function signature is: + +```rust +fn write(writer: W, value: V) -> io::Result +``` + +where `itoa::Integer` is implemented for `i8`, `u8`, `i16`, `u16`, `i32`, `u32`, +`i64`, `u64`, `isize` and `usize`. The return value gives the number of bytes +written. + +## Dependency + +Itoa is available on [crates.io](https://crates.io/crates/itoa). Use the +following in `Cargo.toml`: + +```toml +[dependencies] +itoa = "0.3" +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in itoa by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff -Nru cargo-0.17.0/vendor/itoa-0.3.1/src/lib.rs cargo-0.19.0/vendor/itoa-0.3.1/src/lib.rs --- cargo-0.17.0/vendor/itoa-0.3.1/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/itoa-0.3.1/src/lib.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,101 @@ +// Copyright 2016 Itoa Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{io, mem, ptr, slice}; + +#[inline] +pub fn write(wr: W, value: V) -> io::Result { + value.write(wr) +} + +pub trait Integer { + fn write(self, W) -> io::Result; +} + +const DEC_DIGITS_LUT: &'static[u8] = + b"0001020304050607080910111213141516171819\ + 2021222324252627282930313233343536373839\ + 4041424344454647484950515253545556575859\ + 6061626364656667686970717273747576777879\ + 8081828384858687888990919293949596979899"; + +// Adaptation of the original implementation at +// https://github.com/rust-lang/rust/blob/b8214dc6c6fc20d0a660fb5700dca9ebf51ebe89/src/libcore/fmt/num.rs#L188-L266 +macro_rules! impl_Integer { + ($($t:ident),* as $conv_fn:ident) => ($( + impl Integer for $t { + #[allow(unused_comparisons)] + fn write(self, mut wr: W) -> io::Result { + let is_nonnegative = self >= 0; + let mut n = if is_nonnegative { + self as $conv_fn + } else { + try!(wr.write_all(b"-")); + // convert the negative num to positive by summing 1 to it's 2 complement + (!(self as $conv_fn)).wrapping_add(1) + }; + let mut buf: [u8; 20] = unsafe { mem::uninitialized() }; + let mut curr = buf.len() as isize; + let buf_ptr = buf.as_mut_ptr(); + let lut_ptr = DEC_DIGITS_LUT.as_ptr(); + + unsafe { + // eagerly decode 4 characters at a time + if <$t>::max_value() as u64 >= 10000 { + while n >= 10000 { + let rem = (n % 10000) as isize; + n /= 10000; + + let d1 = (rem / 100) << 1; + let d2 = (rem % 100) << 1; + curr -= 4; + ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2); + } + } + + // if we reach here numbers are <= 9999, so at most 4 chars long + let mut n = n as isize; // possibly reduce 64bit math + + // decode 2 more chars, if > 2 chars + if n >= 100 { + let d1 = (n % 100) << 1; + n /= 100; + curr -= 2; + ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + } + + // decode last 1 or 2 chars + if n < 10 { + curr -= 1; + *buf_ptr.offset(curr) = (n as u8) + 48; + } else { + let d1 = n << 1; + curr -= 2; + ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + } + } + + let mut len = buf.len() - curr as usize; + try!(wr.write_all(unsafe { slice::from_raw_parts(buf_ptr.offset(curr), len) })); + if !is_nonnegative { + len += 1; + } + Ok(len) + } + })*); +} + +impl_Integer!(i8, u8, i16, u16, i32, u32 as u32); +impl_Integer!(i64, u64 as u64); +#[cfg(target_pointer_width = "16")] +impl_Integer!(isize, usize as u16); +#[cfg(target_pointer_width = "32")] +impl_Integer!(isize, usize as u32); +#[cfg(target_pointer_width = "64")] +impl_Integer!(isize, usize as u64); diff -Nru cargo-0.17.0/vendor/itoa-0.3.1/tests/test.rs cargo-0.19.0/vendor/itoa-0.3.1/tests/test.rs --- cargo-0.17.0/vendor/itoa-0.3.1/tests/test.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/itoa-0.3.1/tests/test.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,25 @@ +#![allow(non_snake_case)] + +extern crate itoa; + +macro_rules! test { + ($($name:ident($value:expr, $expected:expr),)*) => { + $( + #[test] + fn $name() { + let mut buf = [b'\0'; 20]; + let len = itoa::write(&mut buf[..], $value).unwrap(); + assert_eq!(&buf[0..len], $expected.as_bytes()); + } + )* + } +} + +test!( + test_0u64(0u64, "0"), + test_HALFu64(::max_value() as u64, "4294967295"), + test_MAXu64(::max_value(), "18446744073709551615"), + + test_0i16(0i16, "0"), + test_MINi16(::min_value(), "-32768"), +); diff -Nru cargo-0.17.0/vendor/itoa-0.3.1/.travis.yml cargo-0.19.0/vendor/itoa-0.3.1/.travis.yml --- cargo-0.17.0/vendor/itoa-0.3.1/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/itoa-0.3.1/.travis.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,6 @@ +sudo: false + +language: rust + +rust: + - nightly diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/.cargo-checksum.json cargo-0.19.0/vendor/lazy_static-0.2.2/.cargo-checksum.json --- cargo-0.17.0/vendor/lazy_static-0.2.2/.cargo-checksum.json 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"ee9afa8d5e10d088632b66c622d4451a826a4066e0e84052f40b9b3e82c6bec7",".travis.yml":"b6983ce563e5ec756033bfef06e2f2b5f0ac2c1e8eb15803051c1929a328ce30","Cargo.toml":"01ccfeb6f1bfb8320bec14aa4681e4573b35ff0b7514dece1934924ff89bbeac","LICENSE":"79d26c3d855d83d92837c49a868339ec7c2ef7d2a19d7a779ebb4c30d160d90a","README.md":"932d081be16cf7b787400973604712c80b2bc119764c7b742c53cf093cd056fa","src/core_lazy.rs":"fb3e56b2480d2a970e3ac4e3104119b9b22c58b05b6f63dc5b6fd39082faa635","src/lazy.rs":"fcc9cf369e72d52da24307a98317e7001b339948e6063baa449e10294e80e94d","src/lib.rs":"92c3974a0a1c92e75cca75a287ad9241b436b807929854385435d1f4b80931f3","src/nightly_lazy.rs":"31619f7467766127ca049df5d14f82384f6f756db52fc388421ef8a39ba5465f","tests/no_std.rs":"2a5236bd3892a253855b4dc192f63138239165fa23b9c3421a9faa5482c780aa","tests/test.rs":"3133070a63f278c2204dbafa67734453ed003971947521c412b9d366daf62b73"},"package":"6abe0ee2e758cd6bc8a2cd56726359007748fbf4128da998b65d0b70f881e19b"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/Cargo.toml cargo-0.19.0/vendor/lazy_static-0.2.2/Cargo.toml --- cargo-0.17.0/vendor/lazy_static-0.2.2/Cargo.toml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -[package] -name = "lazy_static" -version = "0.2.2" -authors = ["Marvin Löbel "] -license = "MIT" - -description = "A macro for declaring lazily evaluated statics in Rust." -readme = "README.md" -documentation = "http://rust-lang-nursery.github.io/lazy-static.rs/lazy_static/index.html" - -repository = "https://github.com/rust-lang-nursery/lazy-static.rs" -keywords = ["macro", "lazy", "static"] - -[dependencies.spin] -version = "0.4" -optional = true - -[features] -nightly = [] -spin_no_std = ["nightly", "spin"] diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/.gitignore cargo-0.19.0/vendor/lazy_static-0.2.2/.gitignore --- cargo-0.17.0/vendor/lazy_static-0.2.2/.gitignore 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -target -doc -Cargo.lock -.cargo diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/LICENSE cargo-0.19.0/vendor/lazy_static-0.2.2/LICENSE --- cargo-0.17.0/vendor/lazy_static-0.2.2/LICENSE 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Marvin Löbel - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/README.md cargo-0.19.0/vendor/lazy_static-0.2.2/README.md --- cargo-0.17.0/vendor/lazy_static-0.2.2/README.md 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -lazy-static.rs -============== - -[![Travis-CI Status](https://travis-ci.org/rust-lang-nursery/lazy-static.rs.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/lazy-static.rs) - -A macro for declaring lazily evaluated statics in Rust. - -Using this macro, it is possible to have `static`s that require code to be -executed at runtime in order to be initialized. -This includes anything requiring heap allocations, like vectors or hash maps, -as well as anything that requires function calls to be computed. - -# Syntax - -```rust -lazy_static! { - [pub] static ref NAME_1: TYPE_1 = EXPR_1; - [pub] static ref NAME_2: TYPE_2 = EXPR_2; - ... - [pub] static ref NAME_N: TYPE_N = EXPR_N; -} -``` - -# Semantic - -For a given `static ref NAME: TYPE = EXPR;`, the macro generates a -unique type that implements `Deref` and stores it in a static with name `NAME`. - -On first deref, `EXPR` gets evaluated and stored internally, such that all further derefs -can return a reference to the same object. - -Like regular `static mut`s, this macro only works for types that fulfill the `Sync` -trait. - -# Getting Started - -[lazy-static.rs is available on crates.io](https://crates.io/crates/lazy_static). -Add the following dependency to your Cargo manifest to get the latest version of the 0.1 branch: - -```toml -[dependencies] -lazy_static = "0.1.*" -``` - -To always get the latest version, add this git repository to your -Cargo manifest: - -```toml -[dependencies.lazy_static] -git = "https://github.com/rust-lang-nursery/lazy-static.rs" -``` -# Example - -Using the macro: - -```rust -#[macro_use] -extern crate lazy_static; - -use std::collections::HashMap; - -lazy_static! { - static ref HASHMAP: HashMap = { - let mut m = HashMap::new(); - m.insert(0, "foo"); - m.insert(1, "bar"); - m.insert(2, "baz"); - m - }; - static ref COUNT: usize = HASHMAP.len(); - static ref NUMBER: u32 = times_two(21); -} - -fn times_two(n: u32) -> u32 { n * 2 } - -fn main() { - println!("The map has {} entries.", *COUNT); - println!("The entry for `0` is \"{}\".", HASHMAP.get(&0).unwrap()); - println!("An expensive calculation on a static results in: {}.", *NUMBER); -} -``` diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/src/core_lazy.rs cargo-0.19.0/vendor/lazy_static-0.2.2/src/core_lazy.rs --- cargo-0.17.0/vendor/lazy_static-0.2.2/src/core_lazy.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/src/core_lazy.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -extern crate spin; - -use self::spin::Once; - -pub struct Lazy(Once); - -impl Lazy { - #[inline(always)] - pub const fn new() -> Self { - Lazy(Once::new()) - } - - #[inline(always)] - pub fn get(&'static self, builder: F) -> &T - where F: FnOnce() -> T - { - self.0.call_once(builder) - } -} - -#[macro_export] -#[allow_internal_unstable] -macro_rules! __lazy_static_create { - ($NAME:ident, $T:ty) => { - static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::new(); - } -} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/src/lazy.rs cargo-0.19.0/vendor/lazy_static-0.2.2/src/lazy.rs --- cargo-0.17.0/vendor/lazy_static-0.2.2/src/lazy.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/src/lazy.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -extern crate std; - -use self::std::prelude::v1::*; -use self::std::sync::Once; - -pub struct Lazy(pub *const T, pub Once); - -impl Lazy { - #[inline(always)] - pub fn get(&'static mut self, f: F) -> &T - where F: FnOnce() -> T - { - unsafe { - let r = &mut self.0; - self.1.call_once(|| { - *r = Box::into_raw(Box::new(f())); - }); - - &*self.0 - } - } -} - -unsafe impl Sync for Lazy {} - -#[macro_export] -macro_rules! __lazy_static_create { - ($NAME:ident, $T:ty) => { - use std::sync::ONCE_INIT; - static mut $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy(0 as *const $T, ONCE_INIT); - } -} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/src/lib.rs cargo-0.19.0/vendor/lazy_static-0.2.2/src/lib.rs --- cargo-0.17.0/vendor/lazy_static-0.2.2/src/lib.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,137 +0,0 @@ -/*! -A macro for declaring lazily evaluated statics. - -Using this macro, it is possible to have `static`s that require code to be -executed at runtime in order to be initialized. -This includes anything requiring heap allocations, like vectors or hash maps, -as well as anything that requires function calls to be computed. - -# Syntax - -```ignore -lazy_static! { - [pub] static ref NAME_1: TYPE_1 = EXPR_1; - [pub] static ref NAME_2: TYPE_2 = EXPR_2; - ... - [pub] static ref NAME_N: TYPE_N = EXPR_N; -} -``` - -Metadata (such as doc comments) is allowed on each ref. - -# Semantic - -For a given `static ref NAME: TYPE = EXPR;`, the macro generates a unique type that -implements `Deref` and stores it in a static with name `NAME`. (Metadata ends up -attaching to this type.) - -On first deref, `EXPR` gets evaluated and stored internally, such that all further derefs -can return a reference to the same object. - -Like regular `static mut`s, this macro only works for types that fulfill the `Sync` -trait. - -# Example - -Using the macro: - -```rust -#[macro_use] -extern crate lazy_static; - -use std::collections::HashMap; - -lazy_static! { - static ref HASHMAP: HashMap = { - let mut m = HashMap::new(); - m.insert(0, "foo"); - m.insert(1, "bar"); - m.insert(2, "baz"); - m - }; - static ref COUNT: usize = HASHMAP.len(); - static ref NUMBER: u32 = times_two(21); -} - -fn times_two(n: u32) -> u32 { n * 2 } - -fn main() { - println!("The map has {} entries.", *COUNT); - println!("The entry for `0` is \"{}\".", HASHMAP.get(&0).unwrap()); - println!("A expensive calculation on a static results in: {}.", *NUMBER); -} -``` - -# Implementation details - -The `Deref` implementation uses a hidden static variable that is guarded by a atomic check on each access. On stable Rust, the macro may need to allocate each static on the heap. - -*/ - -#![cfg_attr(feature="nightly", feature(const_fn, allow_internal_unstable, core_intrinsics))] - -#![no_std] - -#[cfg(not(feature="nightly"))] -pub mod lazy; - -#[cfg(all(feature="nightly", not(feature="spin_no_std")))] -#[path="nightly_lazy.rs"] -pub mod lazy; - -#[cfg(all(feature="nightly", feature="spin_no_std"))] -#[path="core_lazy.rs"] -pub mod lazy; - -pub use core::ops::Deref as __Deref; - -#[macro_export] -#[cfg_attr(feature="nightly", allow_internal_unstable)] -macro_rules! lazy_static { - ($(#[$attr:meta])* static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { - lazy_static!(@PRIV, $(#[$attr])* static ref $N : $T = $e; $($t)*); - }; - ($(#[$attr:meta])* pub static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { - lazy_static!(@PUB, $(#[$attr])* static ref $N : $T = $e; $($t)*); - }; - (@$VIS:ident, $(#[$attr:meta])* static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { - lazy_static!(@MAKE TY, $VIS, $(#[$attr])*, $N); - impl $crate::__Deref for $N { - type Target = $T; - #[allow(unsafe_code)] - fn deref<'a>(&'a self) -> &'a $T { - unsafe { - #[inline(always)] - fn __static_ref_initialize() -> $T { $e } - - #[inline(always)] - unsafe fn __stability() -> &'static $T { - __lazy_static_create!(LAZY, $T); - LAZY.get(__static_ref_initialize) - } - __stability() - } - } - } - lazy_static!($($t)*); - }; - (@MAKE TY, PUB, $(#[$attr:meta])*, $N:ident) => { - #[allow(missing_copy_implementations)] - #[allow(non_camel_case_types)] - #[allow(dead_code)] - $(#[$attr])* - pub struct $N {__private_field: ()} - #[doc(hidden)] - pub static $N: $N = $N {__private_field: ()}; - }; - (@MAKE TY, PRIV, $(#[$attr:meta])*, $N:ident) => { - #[allow(missing_copy_implementations)] - #[allow(non_camel_case_types)] - #[allow(dead_code)] - $(#[$attr])* - struct $N {__private_field: ()} - #[doc(hidden)] - static $N: $N = $N {__private_field: ()}; - }; - () => () -} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/src/nightly_lazy.rs cargo-0.19.0/vendor/lazy_static-0.2.2/src/nightly_lazy.rs --- cargo-0.17.0/vendor/lazy_static-0.2.2/src/nightly_lazy.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/src/nightly_lazy.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -extern crate std; - -use self::std::prelude::v1::*; -use self::std::cell::UnsafeCell; -use self::std::sync::{Once, ONCE_INIT}; - -pub struct Lazy(UnsafeCell>, Once); - -impl Lazy { - #[inline(always)] - pub const fn new() -> Self { - Lazy(UnsafeCell::new(None), ONCE_INIT) - } - - #[inline(always)] - pub fn get(&'static self, f: F) -> &T - where F: FnOnce() -> T - { - unsafe { - self.1.call_once(|| { - *self.0.get() = Some(f()); - }); - - match *self.0.get() { - Some(ref x) => x, - None => std::intrinsics::unreachable(), - } - } - } -} - -unsafe impl Sync for Lazy {} - -#[macro_export] -#[allow_internal_unstable] -macro_rules! __lazy_static_create { - ($NAME:ident, $T:ty) => { - static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::new(); - } -} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/tests/no_std.rs cargo-0.19.0/vendor/lazy_static-0.2.2/tests/no_std.rs --- cargo-0.17.0/vendor/lazy_static-0.2.2/tests/no_std.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/tests/no_std.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -#![cfg(feature="spin_no_std")] -#![feature(const_fn)] - -#![no_std] - -#[macro_use] -extern crate lazy_static; - -lazy_static! { - /// Documentation! - pub static ref NUMBER: u32 = times_two(3); -} - -fn times_two(n: u32) -> u32 { - n * 2 -} - -#[test] -fn test_basic() { - assert_eq!(*NUMBER, 6); -} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/tests/test.rs cargo-0.19.0/vendor/lazy_static-0.2.2/tests/test.rs --- cargo-0.17.0/vendor/lazy_static-0.2.2/tests/test.rs 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/tests/test.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,129 +0,0 @@ -#![cfg_attr(feature="nightly", feature(const_fn))] - -#[macro_use] -extern crate lazy_static; -use std::collections::HashMap; - -lazy_static! { - /// Documentation! - pub static ref NUMBER: u32 = times_two(3); - - static ref ARRAY_BOXES: [Box; 3] = [Box::new(1), Box::new(2), Box::new(3)]; - - /// More documentation! - #[allow(unused_variables)] - #[derive(Copy, Clone, Debug)] - pub static ref STRING: String = "hello".to_string(); - - static ref HASHMAP: HashMap = { - let mut m = HashMap::new(); - m.insert(0, "abc"); - m.insert(1, "def"); - m.insert(2, "ghi"); - m - }; - - // This should not compile if the unsafe is removed. - static ref UNSAFE: u32 = unsafe { - std::mem::transmute::(-1) - }; - - // This *should* triggger warn(dead_code) by design. - static ref UNUSED: () = (); - -} - -lazy_static! { - static ref S1: &'static str = "a"; - static ref S2: &'static str = "b"; -} -lazy_static! { - static ref S3: String = [*S1, *S2].join(""); -} - -#[test] -fn s3() { - assert_eq!(&*S3, "ab"); -} - -fn times_two(n: u32) -> u32 { - n * 2 -} - -#[test] -fn test_basic() { - assert_eq!(&**STRING, "hello"); - assert_eq!(*NUMBER, 6); - assert!(HASHMAP.get(&1).is_some()); - assert!(HASHMAP.get(&3).is_none()); - assert_eq!(&*ARRAY_BOXES, &[Box::new(1), Box::new(2), Box::new(3)]); - assert_eq!(*UNSAFE, std::u32::MAX); -} - -#[test] -fn test_repeat() { - assert_eq!(*NUMBER, 6); - assert_eq!(*NUMBER, 6); - assert_eq!(*NUMBER, 6); -} - -#[test] -fn test_meta() { - // this would not compile if STRING were not marked #[derive(Copy, Clone)] - let copy_of_string = STRING; - // just to make sure it was copied - assert!(&STRING as *const _ != ©_of_string as *const _); - - // this would not compile if STRING were not marked #[derive(Debug)] - assert_eq!(format!("{:?}", STRING), "STRING { __private_field: () }".to_string()); -} - -mod visibility { - lazy_static! { - pub static ref FOO: Box = Box::new(0); - static ref BAR: Box = Box::new(98); - } - - #[test] - fn sub_test() { - assert_eq!(**FOO, 0); - assert_eq!(**BAR, 98); - } -} - -#[test] -fn test_visibility() { - assert_eq!(*visibility::FOO, Box::new(0)); -} - -// This should not cause a warning about a missing Copy implementation -lazy_static! { - pub static ref VAR: i32 = { 0 }; -} - -#[derive(Copy, Clone, Debug, PartialEq)] -struct X; -struct Once(X); -const ONCE_INIT: Once = Once(X); -static DATA: X = X; -static ONCE: X = X; -fn require_sync() -> X { X } -fn transmute() -> X { X } -fn __static_ref_initialize() -> X { X } -fn test(_: Vec) -> X { X } - -// All these names should not be shadowed -lazy_static! { - static ref ITEM_NAME_TEST: X = { - test(vec![X, Once(X).0, ONCE_INIT.0, DATA, ONCE, - require_sync(), transmute(), - // Except this, which will sadly be shadowed by internals: - // __static_ref_initialize() - ]) - }; -} - -#[test] -fn item_name_shadowing() { - assert_eq!(*ITEM_NAME_TEST, X); -} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.2/.travis.yml cargo-0.19.0/vendor/lazy_static-0.2.2/.travis.yml --- cargo-0.17.0/vendor/lazy_static-0.2.2/.travis.yml 2017-03-24 16:59:54.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.2/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -language: rust -rust: -- nightly -- beta -- stable -before_script: -- | - pip install 'travis-cargo<0.2' --user && - export PATH=$HOME/.local/bin:$PATH -script: -- | - travis-cargo build && - travis-cargo test && - travis-cargo bench && - travis-cargo --only nightly build -- --features spin_no_std && - travis-cargo --only nightly test -- --features spin_no_std && - travis-cargo --only nightly bench -- --features spin_no_std && - travis-cargo --only stable doc -after_success: -- travis-cargo --only stable doc-upload -env: - global: - - TRAVIS_CARGO_NIGHTLY_FEATURE=nightly - - secure: YXu24LptjeYirjWYjWGsMT2m3mB7LvQATE6TVo7VEUXv8GYoy2ORIHD83PeImxC93MmZ01QeUezRzuCW51ZcK92VnNSBttlF60SvIX18VsJrV92tsAhievFstqYQ+fB8DIuQ8noU0jPz7GpI+R9dlTRSImAqWOnVIghA+Wzz7Js= diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/.cargo-checksum.json cargo-0.19.0/vendor/lazy_static-0.2.5/.cargo-checksum.json --- cargo-0.17.0/vendor/lazy_static-0.2.5/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/.cargo-checksum.json 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"ee9afa8d5e10d088632b66c622d4451a826a4066e0e84052f40b9b3e82c6bec7",".travis.yml":"b6983ce563e5ec756033bfef06e2f2b5f0ac2c1e8eb15803051c1929a328ce30","Cargo.toml":"7502a1e70105dbba367a5e16bfa679a31ed651e2fe0625994ef34de885282ff6","LICENSE":"79d26c3d855d83d92837c49a868339ec7c2ef7d2a19d7a779ebb4c30d160d90a","README.md":"9d0b4deeaeb55a9d4150ab2f7c2d3f6792445e0b690eacd15703429b4e9fbfdf","src/core_lazy.rs":"b67330f53f27e7e42b44a4293210a440d0185aeea31209b54325531712c70fdd","src/lazy.rs":"173aee41765dd16a0006cc9b929f386bac26c5c32aa2dffeae9181b732fe53a8","src/lib.rs":"74b60fa043c980fa1f9a4937a57b1443caac93299fda389eac013fac7d141eb7","src/nightly_lazy.rs":"5e547f378ae236c5993e160c66e8633e6a69888786af169bab573d5d692063aa","tests/no_std.rs":"2a5236bd3892a253855b4dc192f63138239165fa23b9c3421a9faa5482c780aa","tests/test.rs":"e6ed5863b308e396b230a3de38c6b92f16d7d45377ba17c6cac8618ca379ee58"},"package":"4732c563b9a21a406565c4747daa7b46742f082911ae4753f390dc9ec7ee1a97"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/Cargo.toml cargo-0.19.0/vendor/lazy_static-0.2.5/Cargo.toml --- cargo-0.17.0/vendor/lazy_static-0.2.5/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/Cargo.toml 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,20 @@ +[package] +name = "lazy_static" +version = "0.2.5" +authors = ["Marvin Löbel "] +license = "MIT" + +description = "A macro for declaring lazily evaluated statics in Rust." +readme = "README.md" +documentation = "http://rust-lang-nursery.github.io/lazy-static.rs/lazy_static/index.html" + +repository = "https://github.com/rust-lang-nursery/lazy-static.rs" +keywords = ["macro", "lazy", "static"] + +[dependencies.spin] +version = "0.4" +optional = true + +[features] +nightly = [] +spin_no_std = ["nightly", "spin"] diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/.gitignore cargo-0.19.0/vendor/lazy_static-0.2.5/.gitignore --- cargo-0.17.0/vendor/lazy_static-0.2.5/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/.gitignore 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,4 @@ +target +doc +Cargo.lock +.cargo diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/LICENSE cargo-0.19.0/vendor/lazy_static-0.2.5/LICENSE --- cargo-0.17.0/vendor/lazy_static-0.2.5/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/LICENSE 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Marvin Löbel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/README.md cargo-0.19.0/vendor/lazy_static-0.2.5/README.md --- cargo-0.17.0/vendor/lazy_static-0.2.5/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/README.md 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,54 @@ +lazy-static.rs +============== + +A macro for declaring lazily evaluated statics in Rust. + +Using this macro, it is possible to have `static`s that require code to be +executed at runtime in order to be initialized. +This includes anything requiring heap allocations, like vectors or hash maps, +as well as anything that requires non-const function calls to be computed. + +[![Travis-CI Status](https://travis-ci.org/rust-lang-nursery/lazy-static.rs.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/lazy-static.rs) + +# Getting Started + +[lazy-static.rs is available on crates.io](https://crates.io/crates/lazy_static). +It is recommended to look there for the newest released version, as well as links to the newest builds of the docs. + +At the point of the last update of this README, the latest published version could be used like this: + +Add the following dependency to your Cargo manifest... + +```toml +[dependencies] +lazy_static = "0.2" +``` + +...and see the [docs](http://rust-lang-nursery.github.io/lazy-static.rs/lazy_static/index.html) for how to use it. + +# Example + +```rust +#[macro_use] +extern crate lazy_static; + +use std::collections::HashMap; + +lazy_static! { + static ref HASHMAP: HashMap = { + let mut m = HashMap::new(); + m.insert(0, "foo"); + m.insert(1, "bar"); + m.insert(2, "baz"); + m + }; +} + +fn main() { + // First access to `HASHMAP` initializes it + println!("The entry for `0` is \"{}\".", HASHMAP.get(&0).unwrap()); + + // Any further access to `HASHMAP` just returns the computed value + println!("The entry for `1` is \"{}\".", HASHMAP.get(&1).unwrap()); +} +``` diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/src/core_lazy.rs cargo-0.19.0/vendor/lazy_static-0.2.5/src/core_lazy.rs --- cargo-0.17.0/vendor/lazy_static-0.2.5/src/core_lazy.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/src/core_lazy.rs 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,28 @@ +extern crate spin; + +use self::spin::Once; + +pub struct Lazy(Once); + +impl Lazy { + #[inline(always)] + pub const fn new() -> Self { + Lazy(Once::new()) + } + + #[inline(always)] + pub fn get(&'static self, builder: F) -> &T + where F: FnOnce() -> T + { + self.0.call_once(builder) + } +} + +#[macro_export] +#[allow_internal_unstable] +#[doc(hidden)] +macro_rules! __lazy_static_create { + ($NAME:ident, $T:ty) => { + static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::new(); + } +} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/src/lazy.rs cargo-0.19.0/vendor/lazy_static-0.2.5/src/lazy.rs --- cargo-0.17.0/vendor/lazy_static-0.2.5/src/lazy.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/src/lazy.rs 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,33 @@ +extern crate std; + +use self::std::prelude::v1::*; +use self::std::sync::Once; + +pub struct Lazy(pub *const T, pub Once); + +impl Lazy { + #[inline(always)] + pub fn get(&'static mut self, f: F) -> &T + where F: FnOnce() -> T + { + unsafe { + let r = &mut self.0; + self.1.call_once(|| { + *r = Box::into_raw(Box::new(f())); + }); + + &*self.0 + } + } +} + +unsafe impl Sync for Lazy {} + +#[macro_export] +#[doc(hidden)] +macro_rules! __lazy_static_create { + ($NAME:ident, $T:ty) => { + use std::sync::ONCE_INIT; + static mut $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy(0 as *const $T, ONCE_INIT); + } +} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/src/lib.rs cargo-0.19.0/vendor/lazy_static-0.2.5/src/lib.rs --- cargo-0.17.0/vendor/lazy_static-0.2.5/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/src/lib.rs 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,210 @@ +/*! +A macro for declaring lazily evaluated statics. + +Using this macro, it is possible to have `static`s that require code to be +executed at runtime in order to be initialized. +This includes anything requiring heap allocations, like vectors or hash maps, +as well as anything that requires function calls to be computed. + +# Syntax + +```ignore +lazy_static! { + [pub] static ref NAME_1: TYPE_1 = EXPR_1; + [pub] static ref NAME_2: TYPE_2 = EXPR_2; + ... + [pub] static ref NAME_N: TYPE_N = EXPR_N; +} +``` + +Attributes (including doc comments) are supported as well: + +```rust +# #[macro_use] +# extern crate lazy_static; +# fn main() { +lazy_static! { + /// This is an example for using doc comment attributes + static ref EXAMPLE: u8 = 42; +} +# } +``` + +# Semantics + +For a given `static ref NAME: TYPE = EXPR;`, the macro generates a unique type that +implements `Deref` and stores it in a static with name `NAME`. (Attributes end up +attaching to this type.) + +On first deref, `EXPR` gets evaluated and stored internally, such that all further derefs +can return a reference to the same object. Note that this can lead to deadlocks +if you have multiple lazy statics that depend on each other in their initialization. + +Apart from the lazy initialization, the resulting "static ref" variables +have generally the same properties as regular "static" variables: + +- Any type in them needs to fulfill the `Sync` trait. +- If the type has a destructor, then it will not run when the process exits. + +# Example + +Using the macro: + +```rust +#[macro_use] +extern crate lazy_static; + +use std::collections::HashMap; + +lazy_static! { + static ref HASHMAP: HashMap = { + let mut m = HashMap::new(); + m.insert(0, "foo"); + m.insert(1, "bar"); + m.insert(2, "baz"); + m + }; + static ref COUNT: usize = HASHMAP.len(); + static ref NUMBER: u32 = times_two(21); +} + +fn times_two(n: u32) -> u32 { n * 2 } + +fn main() { + println!("The map has {} entries.", *COUNT); + println!("The entry for `0` is \"{}\".", HASHMAP.get(&0).unwrap()); + println!("A expensive calculation on a static results in: {}.", *NUMBER); +} +``` + +# Implementation details + +The `Deref` implementation uses a hidden static variable that is guarded by a atomic check on each access. On stable Rust, the macro may need to allocate each static on the heap. + +*/ + +#![cfg_attr(feature="nightly", feature(const_fn, allow_internal_unstable, core_intrinsics))] + +#![no_std] + +#[cfg(not(feature="nightly"))] +#[doc(hidden)] +pub mod lazy; + +#[cfg(all(feature="nightly", not(feature="spin_no_std")))] +#[path="nightly_lazy.rs"] +#[doc(hidden)] +pub mod lazy; + +#[cfg(all(feature="nightly", feature="spin_no_std"))] +#[path="core_lazy.rs"] +#[doc(hidden)] +pub mod lazy; + +#[doc(hidden)] +pub use core::ops::Deref as __Deref; + +#[macro_export] +#[cfg_attr(feature="nightly", allow_internal_unstable)] +#[doc(hidden)] +macro_rules! __lazy_static_internal { + ($(#[$attr:meta])* static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!(@PRIV, $(#[$attr])* static ref $N : $T = $e; $($t)*); + }; + ($(#[$attr:meta])* pub static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!(@PUB, $(#[$attr])* static ref $N : $T = $e; $($t)*); + }; + (@$VIS:ident, $(#[$attr:meta])* static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!(@MAKE TY, $VIS, $(#[$attr])*, $N); + impl $crate::__Deref for $N { + type Target = $T; + #[allow(unsafe_code)] + fn deref<'a>(&'a self) -> &'a $T { + unsafe { + #[inline(always)] + fn __static_ref_initialize() -> $T { $e } + + #[inline(always)] + unsafe fn __stability() -> &'static $T { + __lazy_static_create!(LAZY, $T); + LAZY.get(__static_ref_initialize) + } + __stability() + } + } + } + impl $crate::LazyStatic for $N { + fn initialize(lazy: &Self) { + let _ = &**lazy; + } + } + __lazy_static_internal!($($t)*); + }; + (@MAKE TY, PUB, $(#[$attr:meta])*, $N:ident) => { + #[allow(missing_copy_implementations)] + #[allow(non_camel_case_types)] + #[allow(dead_code)] + $(#[$attr])* + pub struct $N {__private_field: ()} + #[doc(hidden)] + pub static $N: $N = $N {__private_field: ()}; + }; + (@MAKE TY, PRIV, $(#[$attr:meta])*, $N:ident) => { + #[allow(missing_copy_implementations)] + #[allow(non_camel_case_types)] + #[allow(dead_code)] + $(#[$attr])* + struct $N {__private_field: ()} + #[doc(hidden)] + static $N: $N = $N {__private_field: ()}; + }; + () => () +} + +#[macro_export] +#[cfg_attr(feature="nightly", allow_internal_unstable)] +macro_rules! lazy_static { + ($(#[$attr:meta])* static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!(@PRIV, $(#[$attr])* static ref $N : $T = $e; $($t)*); + }; + ($(#[$attr:meta])* pub static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + __lazy_static_internal!(@PUB, $(#[$attr])* static ref $N : $T = $e; $($t)*); + }; + () => () +} + +/// Support trait for enabling a few common operation on lazy static values. +/// +/// This is implemented by each defined lazy static, and +/// used by the free functions in this crate. +pub trait LazyStatic { + #[doc(hidden)] + fn initialize(lazy: &Self); +} + +/// Takes a shared reference to a lazy static and initializes +/// it if it has not been already. +/// +/// This can be used to control the initialization point of a lazy static. +/// +/// Example: +/// +/// ```rust +/// #[macro_use] +/// extern crate lazy_static; +/// +/// lazy_static! { +/// static ref BUFFER: Vec = (0..65537).collect(); +/// } +/// +/// fn main() { +/// lazy_static::initialize(&BUFFER); +/// +/// // ... +/// work_with_initialized_data(&BUFFER); +/// } +/// # fn work_with_initialized_data(_: &[u8]) {} +/// ``` +pub fn initialize(lazy: &T) { + LazyStatic::initialize(lazy); +} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/src/nightly_lazy.rs cargo-0.19.0/vendor/lazy_static-0.2.5/src/nightly_lazy.rs --- cargo-0.17.0/vendor/lazy_static-0.2.5/src/nightly_lazy.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/src/nightly_lazy.rs 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,41 @@ +extern crate std; + +use self::std::prelude::v1::*; +use self::std::cell::UnsafeCell; +use self::std::sync::{Once, ONCE_INIT}; + +pub struct Lazy(UnsafeCell>, Once); + +impl Lazy { + #[inline(always)] + pub const fn new() -> Self { + Lazy(UnsafeCell::new(None), ONCE_INIT) + } + + #[inline(always)] + pub fn get(&'static self, f: F) -> &T + where F: FnOnce() -> T + { + unsafe { + self.1.call_once(|| { + *self.0.get() = Some(f()); + }); + + match *self.0.get() { + Some(ref x) => x, + None => std::intrinsics::unreachable(), + } + } + } +} + +unsafe impl Sync for Lazy {} + +#[macro_export] +#[allow_internal_unstable] +#[doc(hidden)] +macro_rules! __lazy_static_create { + ($NAME:ident, $T:ty) => { + static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::new(); + } +} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/tests/no_std.rs cargo-0.19.0/vendor/lazy_static-0.2.5/tests/no_std.rs --- cargo-0.17.0/vendor/lazy_static-0.2.5/tests/no_std.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/tests/no_std.rs 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,21 @@ +#![cfg(feature="spin_no_std")] +#![feature(const_fn)] + +#![no_std] + +#[macro_use] +extern crate lazy_static; + +lazy_static! { + /// Documentation! + pub static ref NUMBER: u32 = times_two(3); +} + +fn times_two(n: u32) -> u32 { + n * 2 +} + +#[test] +fn test_basic() { + assert_eq!(*NUMBER, 6); +} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/tests/test.rs cargo-0.19.0/vendor/lazy_static-0.2.5/tests/test.rs --- cargo-0.17.0/vendor/lazy_static-0.2.5/tests/test.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/tests/test.rs 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,149 @@ +#![cfg_attr(feature="nightly", feature(const_fn))] + +#[macro_use] +extern crate lazy_static; +use std::collections::HashMap; + +lazy_static! { + /// Documentation! + pub static ref NUMBER: u32 = times_two(3); + + static ref ARRAY_BOXES: [Box; 3] = [Box::new(1), Box::new(2), Box::new(3)]; + + /// More documentation! + #[allow(unused_variables)] + #[derive(Copy, Clone, Debug)] + pub static ref STRING: String = "hello".to_string(); + + static ref HASHMAP: HashMap = { + let mut m = HashMap::new(); + m.insert(0, "abc"); + m.insert(1, "def"); + m.insert(2, "ghi"); + m + }; + + // This should not compile if the unsafe is removed. + static ref UNSAFE: u32 = unsafe { + std::mem::transmute::(-1) + }; + + // This *should* triggger warn(dead_code) by design. + static ref UNUSED: () = (); + +} + +lazy_static! { + static ref S1: &'static str = "a"; + static ref S2: &'static str = "b"; +} +lazy_static! { + static ref S3: String = [*S1, *S2].join(""); +} + +#[test] +fn s3() { + assert_eq!(&*S3, "ab"); +} + +fn times_two(n: u32) -> u32 { + n * 2 +} + +#[test] +fn test_basic() { + assert_eq!(&**STRING, "hello"); + assert_eq!(*NUMBER, 6); + assert!(HASHMAP.get(&1).is_some()); + assert!(HASHMAP.get(&3).is_none()); + assert_eq!(&*ARRAY_BOXES, &[Box::new(1), Box::new(2), Box::new(3)]); + assert_eq!(*UNSAFE, std::u32::MAX); +} + +#[test] +fn test_repeat() { + assert_eq!(*NUMBER, 6); + assert_eq!(*NUMBER, 6); + assert_eq!(*NUMBER, 6); +} + +#[test] +fn test_meta() { + // this would not compile if STRING were not marked #[derive(Copy, Clone)] + let copy_of_string = STRING; + // just to make sure it was copied + assert!(&STRING as *const _ != ©_of_string as *const _); + + // this would not compile if STRING were not marked #[derive(Debug)] + assert_eq!(format!("{:?}", STRING), "STRING { __private_field: () }".to_string()); +} + +mod visibility { + lazy_static! { + pub static ref FOO: Box = Box::new(0); + static ref BAR: Box = Box::new(98); + } + + #[test] + fn sub_test() { + assert_eq!(**FOO, 0); + assert_eq!(**BAR, 98); + } +} + +#[test] +fn test_visibility() { + assert_eq!(*visibility::FOO, Box::new(0)); +} + +// This should not cause a warning about a missing Copy implementation +lazy_static! { + pub static ref VAR: i32 = { 0 }; +} + +#[derive(Copy, Clone, Debug, PartialEq)] +struct X; +struct Once(X); +const ONCE_INIT: Once = Once(X); +static DATA: X = X; +static ONCE: X = X; +fn require_sync() -> X { X } +fn transmute() -> X { X } +fn __static_ref_initialize() -> X { X } +fn test(_: Vec) -> X { X } + +// All these names should not be shadowed +lazy_static! { + static ref ITEM_NAME_TEST: X = { + test(vec![X, Once(X).0, ONCE_INIT.0, DATA, ONCE, + require_sync(), transmute(), + // Except this, which will sadly be shadowed by internals: + // __static_ref_initialize() + ]) + }; +} + +#[test] +fn item_name_shadowing() { + assert_eq!(*ITEM_NAME_TEST, X); +} + +use std::sync::atomic::AtomicBool; +use std::sync::atomic::ATOMIC_BOOL_INIT; +use std::sync::atomic::Ordering::SeqCst; + +static PRE_INIT_FLAG: AtomicBool = ATOMIC_BOOL_INIT; + +lazy_static! { + static ref PRE_INIT: () = { + PRE_INIT_FLAG.store(true, SeqCst); + () + }; +} + +#[test] +fn pre_init() { + assert_eq!(PRE_INIT_FLAG.load(SeqCst), false); + lazy_static::initialize(&PRE_INIT); + assert_eq!(PRE_INIT_FLAG.load(SeqCst), true); +} diff -Nru cargo-0.17.0/vendor/lazy_static-0.2.5/.travis.yml cargo-0.19.0/vendor/lazy_static-0.2.5/.travis.yml --- cargo-0.17.0/vendor/lazy_static-0.2.5/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/lazy_static-0.2.5/.travis.yml 2017-08-16 09:07:16.000000000 +0000 @@ -0,0 +1,24 @@ +language: rust +rust: +- nightly +- beta +- stable +before_script: +- | + pip install 'travis-cargo<0.2' --user && + export PATH=$HOME/.local/bin:$PATH +script: +- | + travis-cargo build && + travis-cargo test && + travis-cargo bench && + travis-cargo --only nightly build -- --features spin_no_std && + travis-cargo --only nightly test -- --features spin_no_std && + travis-cargo --only nightly bench -- --features spin_no_std && + travis-cargo --only stable doc +after_success: +- travis-cargo --only stable doc-upload +env: + global: + - TRAVIS_CARGO_NIGHTLY_FEATURE=nightly + - secure: YXu24LptjeYirjWYjWGsMT2m3mB7LvQATE6TVo7VEUXv8GYoy2ORIHD83PeImxC93MmZ01QeUezRzuCW51ZcK92VnNSBttlF60SvIX18VsJrV92tsAhievFstqYQ+fB8DIuQ8noU0jPz7GpI+R9dlTRSImAqWOnVIghA+Wzz7Js= diff -Nru cargo-0.17.0/vendor/libc-0.2.18/appveyor.yml cargo-0.19.0/vendor/libc-0.2.18/appveyor.yml --- cargo-0.17.0/vendor/libc-0.2.18/appveyor.yml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -environment: - matrix: - - TARGET: x86_64-pc-windows-gnu - MSYS2_BITS: 64 - - TARGET: i686-pc-windows-gnu - MSYS2_BITS: 32 - - TARGET: x86_64-pc-windows-msvc - - TARGET: i686-pc-windows-msvc -install: - - curl -sSf -o rustup-init.exe https://win.rustup.rs/ - - rustup-init.exe -y --default-host %TARGET% - - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin - - if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin - - rustc -V - - cargo -V - -build: false - -test_script: - - cargo test --target %TARGET% - - cargo run --manifest-path libc-test/Cargo.toml --target %TARGET% - -cache: - - target - - C:\Users\appveyor\.cargo\registry diff -Nru cargo-0.17.0/vendor/libc-0.2.18/.cargo-checksum.json cargo-0.19.0/vendor/libc-0.2.18/.cargo-checksum.json --- cargo-0.17.0/vendor/libc-0.2.18/.cargo-checksum.json 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"7150ee9391a955b2ef7e0762fc61c0c1aab167620ca36d88d78062d93b8334ba",".travis.yml":"7cdd02047a3044fcc50a43aacede564cfbe061bab9ccd143a58e7e92e64750c2","Cargo.toml":"a6a896942913853a04393a52b33516140a6a173768fff34920f2d304fe4bb21f","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"4222225ac1d974faee08172b0b0773dfe2b312a13054f090f04c651aa1d1e6ef","appveyor.yml":"c0d70c650b6231e6ff78a352224f1a522a9be69d9da4251adbaddb3f0393294d","ci/README.md":"be804f15e2128e5fd4b160cb0b13cff5f19e7d77b55ec5254aa6fd8731c84f0d","ci/docker/aarch64-unknown-linux-gnu/Dockerfile":"62ca7317439f9c303990e897450a91cd467be05eb75dfc01456d417932ac8672","ci/docker/arm-linux-androideabi/Dockerfile":"172bac5a76024737847ffdac49f68e2b3d890cb2fc1b5e3f7aaaf19b46916830","ci/docker/arm-linux-androideabi/accept-licenses.sh":"84ad00815f628005ed22c5d6cd14990ebc97812a7163bd275b2877904eddab53","ci/docker/arm-linux-androideabi/install-ndk.sh":"eef063bb01a16c0f90471dbce1b5a395b53141d7704e15a3c9a1c4fc5e06d4b1","ci/docker/arm-linux-androideabi/install-sdk.sh":"42c04b17c4a35bef58757332e960a6e4aba1b5e41f8fc0182265163ff93f6182","ci/docker/arm-unknown-linux-gnueabihf/Dockerfile":"e349f7caa463adbde8d6ec4d2b9f7720ed81c77f48d75bbfb78c89751f55c2dc","ci/docker/i686-unknown-linux-gnu/Dockerfile":"07e9df6ba91025cbec7ae81ade63f8cfb8a54c5e1e5a8f8def0617e17bd59db0","ci/docker/i686-unknown-linux-musl/Dockerfile":"1a4d064adff4a8f58773305567cfe5d915bcd0762bcb0e101cf6f4ca628a96da","ci/docker/mips-unknown-linux-gnu/Dockerfile":"860299d96ee50ebdbd788e65eb6ba1f561ef66107647bddffcb2567ac350896b","ci/docker/mips-unknown-linux-musl/Dockerfile":"711c43122fa34cee83a69944493213924b0ff1fccd78c7a141cb2b2127526484","ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile":"163776e0fd38f66df7415421202ac29efc7d345a628947434e573c3885594ab5","ci/docker/mipsel-unknown-linux-musl/Dockerfile":"398c1c6810b07d329f6f29523b3e34603d24939fb6f6a944bcd67d36f25b08c8","ci/docker/powerpc-unknown-linux-gnu/Dockerfile":"08b846a338c2ee70100f4e80db812668dc58bfb536c44a95cd1cf004d965186b","ci/docker/powerpc64-unknown-linux-gnu/Dockerfile":"4da285ffd035d16f5da9e3701841eb86049c8cfa417fa81e53da4ef74152eac0","ci/docker/x86_64-rumprun-netbsd/Dockerfile":"44c3107fb30380785aaed6ff73fa334017a5bb4e3b5c7d4876154f09023a2b99","ci/docker/x86_64-unknown-freebsd/Dockerfile":"ef0f9f63065218728d2daafaa5ba71b17e4ccc23d72e859e0a7133fc64c0815e","ci/docker/x86_64-unknown-linux-gnu/Dockerfile":"67fabbc8c6ac02376cf9344251ad49ecdac396b71accb572fd1ae65225325bc0","ci/docker/x86_64-unknown-linux-musl/Dockerfile":"f71019fed5204b950843ef5e56144161fda7e27fad68ed0e8bc4353c388c7bcf","ci/docker/x86_64-unknown-openbsd/Dockerfile":"dfa5c23a6cff8c7a9a846668118c71a8406a360801fd3632fb12e8fbda6b7338","ci/dox.sh":"2161cb17ee0d6a2279a64149c6b7c73a5b2eab344f248ea1fa0e6c8f6335ec5f","ci/landing-page-footer.html":"b70b3112c2147f5c967e7481061ef38bc2d79a28dd55a16fb916d9c9426da2c4","ci/landing-page-head.html":"ad69663fac7924f27d0209bc519d55838e86edfc4133713a6fd08caadac1b142","ci/run-docker.sh":"7f6c68dbca93788111170ac4678608957a179e76cfe8c5a51d11dfea1742d7f2","ci/run-qemu.sh":"bb859421170871ef23a8940c5e150efec0c01b95e32d2ce2d37b79a45d9d346c","ci/run.sh":"b6a6307ea989b6a84bad0fd7d46c3206b30f7aa06d0b3e92d6cb1c855f4e0c42","ci/style.rs":"60564abc1d5197ed1598426dd0d6ee9939a16d2875b03373538f58843bb616c4","src/dox.rs":"eb6fbcc0b8b59430271bb71ee023961fd165337fc5fd6ca433882457a3c735bd","src/lib.rs":"cc328e10a4c2879e7de1858aabc30e667cb07398e021e0d2d002f4195632127a","src/macros.rs":"bd9802772b0e5c8b3c550d1c24307f06c0d1e4ce656b4ae1cf092142bbe5412c","src/redox.rs":"e4814435c11a200a8acd9ad0e40dac3e44a6881f434d89b91f1ba643c8b9b6f7","src/unix/bsd/apple/b32.rs":"110ecff78da0e8d405d861447904da403d8b3f6da1f0f9dc9987633f3f04fe46","src/unix/bsd/apple/b64.rs":"e6808081c0b276cca3189628716f507c7c0d00b62417cd44addbdaefe848cec7","src/unix/bsd/apple/mod.rs":"25aa17657431107ceecdc1f87bcfdeb5df633eeae9ae85be39187c3a6cbecb0f","src/unix/bsd/freebsdlike/dragonfly/mod.rs":"53ff7f64e03d7aa3d779d4be0768e72fdf513689aa28813bd279a6a8fcbf824a","src/unix/bsd/freebsdlike/freebsd/mod.rs":"8670fd8cae2bef1f5f41b11e7723174d7c64fb73302893173010ee024915b4b9","src/unix/bsd/freebsdlike/freebsd/x86.rs":"54311d3ebf2bb091ab22361e377e6ef9224aec2ecfe459fbfcedde4932db9c58","src/unix/bsd/freebsdlike/freebsd/x86_64.rs":"c7f46b9ae23fde5a9e245a28ed1380066e67f081323b4d253a18e9da3b97b860","src/unix/bsd/freebsdlike/mod.rs":"cc13083464a040ae490541835f708b9c0546833335a7c891939c495ca78eb41f","src/unix/bsd/mod.rs":"bd422d4bca87a3e8ea4bd78b9ae019643399807d036913f42fdd7476f260297d","src/unix/bsd/netbsdlike/mod.rs":"7b62b89c6ba0d5a8e0cf0937587a81e0314f9c5dabb0c9a9164106b677cf4dd8","src/unix/bsd/netbsdlike/netbsd/mod.rs":"6f4535b27610c1f14e781413e8ea20713fa79788addebf96f5e8f3d4b0767c2c","src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs":"927eeccaf3269d299db4c2a55f8010807bf43dfa894aea6a783215f5d3560baa","src/unix/bsd/netbsdlike/netbsd/other/mod.rs":"8ce39030f3e4fb45a3d676ade97da8f6d1b3d5f6d8d141224d341c993c57e090","src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs":"f8cd05dacd3a3136c58da5a2fbe26f703767823b28e74fe8a2b57a7bd98d6d5c","src/unix/bsd/netbsdlike/openbsdlike/mod.rs":"b61a9ad0bf901b2aed86f69a062a70b50a5b29251165d9b62406c09c9d1830b4","src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs":"b1b9cf7be9f0e4d294a57092594074ad03a65fe0eeac9d1104fa874c313e7900","src/unix/bsd/netbsdlike/openbsdlike/other/b32/mod.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/bsd/netbsdlike/openbsdlike/other/b64/mod.rs":"927eeccaf3269d299db4c2a55f8010807bf43dfa894aea6a783215f5d3560baa","src/unix/bsd/netbsdlike/openbsdlike/other/mod.rs":"f5d8db6f54efd05520b31b764a6bacbf612e1aebce097d2d5bfaaef3b91f37b5","src/unix/haiku/b32.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/haiku/b64.rs":"b422430c550c0ba833c9206d1350861e344e3a2eb33d7d58693efb35044be1cc","src/unix/haiku/mod.rs":"d14c45d536f24cd9cd8d5170b9829026da4c782ff2d5855644cc217553e309cf","src/unix/mod.rs":"84cf9ca2f24b1331760a5d51ba7f6bf3c1e170f6642ef9b6d25deb61a50ddcaf","src/unix/notbsd/android/b32.rs":"148e1b4ed8b4f700d5aa24178af925164176e1c18b54db877ced4b55ba9f03d4","src/unix/notbsd/android/b64.rs":"302caf0aa95fa022030717c58de17d85d814b04350eca081a722ec435bc4f217","src/unix/notbsd/android/mod.rs":"49bdee8558e8abbcc9cf1fdab3ba99bc900f3c8fd7744e375d744adefd83090a","src/unix/notbsd/linux/mips/mips32.rs":"6f946d75deed2b4de65927eaf48c98be0d72bc6d932ff5f1c1542db558ec79f2","src/unix/notbsd/linux/mips/mips64.rs":"44ded650aa3e4b251ac5b09a460400c07068e70e8daeb0815932c2f828aa8ec4","src/unix/notbsd/linux/mips/mod.rs":"478646b58921d24fa0ef487a9f0f819981bb5f0bfe5471d6d866366cd890458e","src/unix/notbsd/linux/mod.rs":"ddf67bb80c8f42cad12fa64e92d9a82ab9e90a10e62f041cc1610ae175a9d680","src/unix/notbsd/linux/musl/b32/arm.rs":"0ad8c97458743dc7d81200df0e1223f0a20936933ace77fe786477027597dd7b","src/unix/notbsd/linux/musl/b32/asmjs.rs":"085e410f990312de76f74cb9bbf9fcc27d686e94334143b34511f565d1b8bb91","src/unix/notbsd/linux/musl/b32/mips.rs":"01a92b5dc28ca67e41d7791e398f75210208368e53a848297186de5829d158ec","src/unix/notbsd/linux/musl/b32/mod.rs":"bd29a02c67b69791e7cabd7666503c35ed5322d244a005b9cc7fd0cb28b552a8","src/unix/notbsd/linux/musl/b32/x86.rs":"b47963c6bc5fceec96fded47d58e1c0466b190ec7ae207d666d23be35131c638","src/unix/notbsd/linux/musl/b64/aarch64.rs":"4009c7eaf703472daef2a70bdac910d9fc395a33689ef2e8cf1c4e692445d3f0","src/unix/notbsd/linux/musl/b64/mod.rs":"ec5ec1b11a2f4db24f7c0fadc229eab7c039c5b5b2988bd8eff7a424ef8ef6e9","src/unix/notbsd/linux/musl/b64/powerpc64.rs":"dc28f5b7284235d6cf5519053cac59a1c16dc39223b71cca0871e4880755f852","src/unix/notbsd/linux/musl/b64/x86_64.rs":"43291acc0dfc92c2fec8ba6ce77ee9ca3c20bcdccec18e149f95ba911cee704b","src/unix/notbsd/linux/musl/mod.rs":"423be2ded304c850dc6048c675204e2f8aaf086840d6c154669d7771d57a640e","src/unix/notbsd/linux/other/b32/arm.rs":"f5cb989075fa3b5f997e7101495532c8d5c9f3577412d4c07e4c8c1a16f7b43c","src/unix/notbsd/linux/other/b32/mod.rs":"8b774feb5510b963ed031db7ab3d7e24f1ba5524a6396db0b851d237ccc16fd3","src/unix/notbsd/linux/other/b32/powerpc.rs":"3b62052bb9741afa5349098e6e9c675b60e822e41fed6b5e1b694be1872097b1","src/unix/notbsd/linux/other/b32/x86.rs":"1eda37736f5966c7968b594f74f5018f56b6b8c67bbdeb31fc3db1b6e4ac31b4","src/unix/notbsd/linux/other/b64/aarch64.rs":"a978e82d037a9c8127b2f704323864aff42ac910e721ecc69c255671ca96b950","src/unix/notbsd/linux/other/b64/mod.rs":"efb7740c2fb925ea98977a6a3ff52bc0b72205c1f88a9ba281a939b66b7f0efe","src/unix/notbsd/linux/other/b64/powerpc64.rs":"06a795bca8e91a0143ef1787b034201ed7a21d01960ce9fe869d18c274d5bdb4","src/unix/notbsd/linux/other/b64/x86_64.rs":"0ed128e93f212c0d65660bd95e29190a2dae7c9d15d6fa0d3c4c6656f89e9bdc","src/unix/notbsd/linux/other/mod.rs":"8cf1a781c728da79f9087fa36008151de7af24fd596fdf2d34ffbf64885a1dde","src/unix/notbsd/linux/s390x.rs":"222778cf3e7d1c81e49cbbe9f89477178ae3deedf60e2a485d00f46ab331ea5c","src/unix/notbsd/mod.rs":"8a74a8d5cddbe94cf8fe234ed39a2d989db08cb1c9af70dbf40d87956825b542","src/unix/solaris/mod.rs":"f72395ac3e67798d51433a654e1741f252a99ac7fc4f577168fb4465724c68d9","src/windows.rs":"acccbd341e81206cb1dc66af41762c193ac0dd719d700b64f7e26c967ee7d670"},"package":"a51822fc847e7a8101514d1d44e354ba2ffa7d4c194dcab48870740e327cac70"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/libc-0.2.18/Cargo.toml cargo-0.19.0/vendor/libc-0.2.18/Cargo.toml --- cargo-0.17.0/vendor/libc-0.2.18/Cargo.toml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -[package] - -name = "libc" -version = "0.2.18" -authors = ["The Rust Project Developers"] -license = "MIT/Apache-2.0" -readme = "README.md" -repository = "https://github.com/rust-lang/libc" -homepage = "https://github.com/rust-lang/libc" -documentation = "http://doc.rust-lang.org/libc" -description = """ -A library for types and bindings to native C functions often found in libc or -other common platform libraries. -""" - -[features] -default = ["use_std"] -use_std = [] - -[workspace] -members = ["libc-test", "libc-test/generate-files"] diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/aarch64-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/aarch64-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/aarch64-unknown-linux-gnu/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/aarch64-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -FROM ubuntu:16.10 -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev ca-certificates \ - gcc-aarch64-linux-gnu libc6-dev-arm64-cross qemu-user -ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ - PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/accept-licenses.sh cargo-0.19.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/accept-licenses.sh --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/accept-licenses.sh 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/accept-licenses.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -#!/usr/bin/expect -f -# ignore-license - -set timeout 1800 -set cmd [lindex $argv 0] -set licenses [lindex $argv 1] - -spawn {*}$cmd -expect { - "Do you accept the license '*'*" { - exp_send "y\r" - exp_continue - } - eof -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -FROM ubuntu:16.04 - -RUN dpkg --add-architecture i386 && \ - apt-get update && \ - apt-get install -y --no-install-recommends \ - file \ - curl \ - ca-certificates \ - python \ - unzip \ - expect \ - openjdk-9-jre \ - libstdc++6:i386 \ - gcc \ - libc6-dev - -WORKDIR /android/ - -COPY install-ndk.sh /android/ -RUN sh /android/install-ndk.sh - -ENV PATH=$PATH:/android/ndk-arm/bin:/android/sdk/tools:/android/sdk/platform-tools - -COPY install-sdk.sh accept-licenses.sh /android/ -RUN sh /android/install-sdk.sh - -ENV PATH=$PATH:/rust/bin \ - CARGO_TARGET_ARM_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc \ - ANDROID_EMULATOR_FORCE_32BIT=1 \ - HOME=/tmp -RUN chmod 755 /android/sdk/tools/* - -RUN cp -r /root/.android /tmp -RUN chmod 777 -R /tmp/.android diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/install-ndk.sh cargo-0.19.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/install-ndk.sh --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/install-ndk.sh 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/install-ndk.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -#!/bin/sh -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex - -curl -O https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip -unzip -q android-ndk-r13b-linux-x86_64.zip -android-ndk-r13b/build/tools/make_standalone_toolchain.py \ - --install-dir /android/ndk-arm \ - --arch arm \ - --api 24 - -rm -rf ./android-ndk-r13b-linux-x86_64.zip ./android-ndk-r13b diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/install-sdk.sh cargo-0.19.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/install-sdk.sh --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/install-sdk.sh 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/arm-linux-androideabi/install-sdk.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -#!/bin/sh -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex - -# Prep the SDK and emulator -# -# Note that the update process requires that we accept a bunch of licenses, and -# we can't just pipe `yes` into it for some reason, so we take the same strategy -# located in https://github.com/appunite/docker by just wrapping it in a script -# which apparently magically accepts the licenses. - -mkdir sdk -curl https://dl.google.com/android/android-sdk_r24.4.1-linux.tgz | \ - tar xzf - -C sdk --strip-components=1 - -filter="platform-tools,android-21" -filter="$filter,sys-img-armeabi-v7a-android-21" - -./accept-licenses.sh "android - update sdk -a --no-ui --filter $filter" - -echo "no" | android create avd \ - --name arm-21 \ - --target android-21 \ - --abi armeabi-v7a diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -FROM ubuntu:16.10 -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev ca-certificates \ - gcc-arm-linux-gnueabihf libc6-dev-armhf-cross qemu-user -ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ - PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/i686-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/i686-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/i686-unknown-linux-gnu/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/i686-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -FROM ubuntu:16.10 -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc-multilib libc6-dev ca-certificates -ENV PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/i686-unknown-linux-musl/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/i686-unknown-linux-musl/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/i686-unknown-linux-musl/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/i686-unknown-linux-musl/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -FROM ubuntu:16.10 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc make libc6-dev git curl ca-certificates -# Below we're cross-compiling musl for i686 using the system compiler on an -# x86_64 system. This is an awkward thing to be doing and so we have to jump -# through a couple hoops to get musl to be happy. In particular: -# -# * We specifically pass -m32 in CFLAGS and override CC when running ./configure, -# since otherwise the script will fail to find a compiler. -# * We manually unset CROSS_COMPILE when running make; otherwise the makefile -# will call the non-existent binary 'i686-ar'. -RUN curl https://www.musl-libc.org/releases/musl-1.1.15.tar.gz | \ - tar xzf - && \ - cd musl-1.1.15 && \ - CC=gcc CFLAGS=-m32 ./configure --prefix=/musl-i686 --disable-shared --target=i686 && \ - make CROSS_COMPILE= install -j4 && \ - cd .. && \ - rm -rf musl-1.1.15 -ENV PATH=$PATH:/musl-i686/bin:/rust/bin \ - CC_i686_unknown_linux_musl=musl-gcc diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -FROM ubuntu:16.10 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev qemu-user ca-certificates \ - gcc-mips64-linux-gnuabi64 libc6-dev-mips64-cross \ - qemu-system-mips64 - -ENV CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_LINKER=mips64-linux-gnuabi64-gcc \ - CC_mips64_unknown_linux_gnuabi64=mips64-linux-gnuabi64-gcc \ - PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/mipsel-unknown-linux-musl/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/mipsel-unknown-linux-musl/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/mipsel-unknown-linux-musl/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/mipsel-unknown-linux-musl/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -FROM ubuntu:16.10 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev qemu-user ca-certificates qemu-system-mips curl \ - bzip2 - -RUN mkdir /toolchain - -# Note that this originally came from: -# https://downloads.openwrt.org/snapshots/trunk/ar71xx/generic/OpenWrt-SDK-ar71xx-generic_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 -RUN curl -L https://s3.amazonaws.com/rust-lang-ci/libc/OpenWrt-SDK-ar71xx-generic_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 | \ - tar xjf - -C /toolchain --strip-components=1 - -ENV PATH=$PATH:/rust/bin:/toolchain/bin \ - CC_mipsel_unknown_linux_musl=mipsel-openwrt-linux-gcc \ - CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_MUSL_LINKER=mipsel-openwrt-linux-gcc diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/mips-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/mips-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/mips-unknown-linux-gnu/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/mips-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -FROM ubuntu:16.10 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev qemu-user ca-certificates \ - gcc-mips-linux-gnu libc6-dev-mips-cross \ - qemu-system-mips - -ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_LINKER=mips-linux-gnu-gcc \ - PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/mips-unknown-linux-musl/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/mips-unknown-linux-musl/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/mips-unknown-linux-musl/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/mips-unknown-linux-musl/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -FROM ubuntu:16.10 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev qemu-user ca-certificates qemu-system-mips curl \ - bzip2 - -RUN mkdir /toolchain - -# Note that this originally came from: -# https://downloads.openwrt.org/snapshots/trunk/ar71xx/generic/OpenWrt-SDK-ar71xx-generic_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 -RUN curl -L https://s3.amazonaws.com/rust-lang-ci/libc/OpenWrt-SDK-ar71xx-generic_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 | \ - tar xjf - -C /toolchain --strip-components=1 - -ENV PATH=$PATH:/rust/bin:/toolchain/staging_dir/toolchain-mips_34kc_gcc-5.3.0_musl-1.1.15/bin \ - CC_mips_unknown_linux_musl=mips-openwrt-linux-gcc \ - CARGO_TARGET_MIPS_UNKNOWN_LINUX_MUSL_LINKER=mips-openwrt-linux-gcc diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -FROM ubuntu:16.10 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev qemu-user ca-certificates \ - gcc-powerpc64-linux-gnu libc6-dev-ppc64-cross \ - qemu-system-ppc - -ENV CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_LINKER=powerpc64-linux-gnu-gcc \ - CC=powerpc64-linux-gnu-gcc \ - PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/powerpc-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/powerpc-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/powerpc-unknown-linux-gnu/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/powerpc-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -FROM ubuntu:16.10 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev qemu-user ca-certificates \ - gcc-powerpc-linux-gnu libc6-dev-powerpc-cross \ - qemu-system-ppc - -ENV CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_LINKER=powerpc-linux-gnu-gcc \ - PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/x86_64-rumprun-netbsd/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/x86_64-rumprun-netbsd/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/x86_64-rumprun-netbsd/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/x86_64-rumprun-netbsd/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -FROM mato/rumprun-toolchain-hw-x86_64 -USER root -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - qemu -ENV PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-freebsd/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-freebsd/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-freebsd/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-freebsd/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -FROM alexcrichton/rust-slave-linux-cross:2016-04-15 -USER root - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - qemu genext2fs - -ENTRYPOINT ["sh"] - -ENV PATH=$PATH:/rust/bin \ - QEMU=2016-11-06/freebsd.qcow2.gz \ - CAN_CROSS=1 \ - CARGO_TARGET_X86_64_UNKNOWN_FREEBSD_LINKER=x86_64-unknown-freebsd10-gcc diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-linux-gnu/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -FROM ubuntu:16.10 -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev ca-certificates -ENV PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-linux-musl/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-linux-musl/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-linux-musl/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-linux-musl/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -FROM ubuntu:16.10 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc make libc6-dev git curl ca-certificates -RUN curl https://www.musl-libc.org/releases/musl-1.1.15.tar.gz | \ - tar xzf - && \ - cd musl-1.1.15 && \ - ./configure --prefix=/musl-x86_64 && \ - make install -j4 && \ - cd .. && \ - rm -rf musl-1.1.15 -ENV PATH=$PATH:/musl-x86_64/bin:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-openbsd/Dockerfile cargo-0.19.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-openbsd/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-openbsd/Dockerfile 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/docker/x86_64-unknown-openbsd/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -FROM ubuntu:16.10 - -RUN apt-get update -RUN apt-get install -y --no-install-recommends \ - gcc libc6-dev qemu curl ca-certificates \ - genext2fs -ENV PATH=$PATH:/rust/bin \ - QEMU=2016-11-06/openbsd-6.0-without-pkgs.qcow2 diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/dox.sh cargo-0.19.0/vendor/libc-0.2.18/ci/dox.sh --- cargo-0.17.0/vendor/libc-0.2.18/ci/dox.sh 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/dox.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -#!/bin/sh - -# Builds documentation for all target triples that we have a registered URL for -# in liblibc. This scrapes the list of triples to document from `src/lib.rs` -# which has a bunch of `html_root_url` directives we pick up. - -set -e - -TARGETS=`grep html_root_url src/lib.rs | sed 's/.*".*\/\(.*\)"/\1/'` - -rm -rf target/doc -mkdir -p target/doc - -cp ci/landing-page-head.html target/doc/index.html - -for target in $TARGETS; do - echo documenting $target - - rustdoc -o target/doc/$target --target $target src/lib.rs --cfg dox \ - --crate-name libc - - echo "
  • $target
  • " \ - >> target/doc/index.html -done - -cat ci/landing-page-footer.html >> target/doc/index.html - -# If we're on travis, not a PR, and on the right branch, publish! -if [ "$TRAVIS_PULL_REQUEST" = "false" ] && [ "$TRAVIS_BRANCH" = "master" ]; then - pip install ghp-import --user $USER - $HOME/.local/bin/ghp-import -n target/doc - git push -qf https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages -fi diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/landing-page-footer.html cargo-0.19.0/vendor/libc-0.2.18/ci/landing-page-footer.html --- cargo-0.17.0/vendor/libc-0.2.18/ci/landing-page-footer.html 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/landing-page-footer.html 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ - - - diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/landing-page-head.html cargo-0.19.0/vendor/libc-0.2.18/ci/landing-page-head.html --- cargo-0.17.0/vendor/libc-0.2.18/ci/landing-page-head.html 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/landing-page-head.html 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ - - - - - - -
      diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/README.md cargo-0.19.0/vendor/libc-0.2.18/ci/README.md --- cargo-0.17.0/vendor/libc-0.2.18/ci/README.md 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,203 +0,0 @@ -The goal of the libc crate is to have CI running everywhere to have the -strongest guarantees about the definitions that this library contains, and as a -result the CI is pretty complicated and also pretty large! Hopefully this can -serve as a guide through the sea of scripts in this directory and elsewhere in -this project. - -# Files - -First up, let's talk about the files in this directory: - -* `run-travis.sh` - a shell script run by all Travis builders, this is - responsible for setting up the rest of the environment such as installing new - packages, downloading Rust target libraries, etc. - -* `run.sh` - the actual script which runs tests for a particular architecture. - Called from the `run-travis.sh` script this will run all tests for the target - specified. - -* `cargo-config` - Cargo configuration of linkers to use copied into place by - the `run-travis.sh` script before builds are run. - -* `dox.sh` - script called from `run-travis.sh` on only the linux 64-bit nightly - Travis bots to build documentation for this crate. - -* `landing-page-*.html` - used by `dox.sh` to generate a landing page for all - architectures' documentation. - -* `run-qemu.sh` - see discussion about QEMU below - -* `mips`, `rumprun` - instructions to build the docker image for each respective - CI target - -# CI Systems - -Currently this repository leverages a combination of Travis CI and AppVeyor for -running tests. The triples tested are: - -* AppVeyor - * `{i686,x86_64}-pc-windows-{msvc,gnu}` -* Travis - * `{i686,x86_64,mips,aarch64}-unknown-linux-gnu` - * `x86_64-unknown-linux-musl` - * `arm-unknown-linux-gnueabihf` - * `arm-linux-androideabi` - * `{i686,x86_64}-apple-{darwin,ios}` - * `x86_64-rumprun-netbsd` - * `x86_64-unknown-freebsd` - * `x86_64-unknown-openbsd` - -The Windows triples are all pretty standard, they just set up their environment -then run tests, no need for downloading any extra target libs (we just download -the right installer). The Intel Linux/OSX builds are similar in that we just -download the right target libs and run tests. Note that the Intel Linux/OSX -builds are run on stable/beta/nightly, but are the only ones that do so. - -The remaining architectures look like: - -* Android runs in a [docker image][android-docker] with an emulator, the NDK, - and the SDK already set up. The entire build happens within the docker image. -* The MIPS, ARM, and AArch64 builds all use the QEMU userspace emulator to run - the generated binary to actually verify the tests pass. -* The MUSL build just has to download a MUSL compiler and target libraries and - then otherwise runs tests normally. -* iOS builds need an extra linker flag currently, but beyond that they're built - as standard as everything else. -* The rumprun target builds an entire kernel from the test suite and then runs - it inside QEMU using the serial console to test whether it succeeded or - failed. -* The BSD builds, currently OpenBSD and FreeBSD, use QEMU to boot up a system - and compile/run tests. More information on that below. - -[android-docker]: https://github.com/rust-lang/rust-buildbot/blob/master/slaves/android/Dockerfile - -## QEMU - -Lots of the architectures tested here use QEMU in the tests, so it's worth going -over all the crazy capabilities QEMU has and the various flavors in which we use -it! - -First up, QEMU has userspace emulation where it doesn't boot a full kernel, it -just runs a binary from another architecture (using the `qemu-` wrappers). -We provide it the runtime path for the dynamically loaded system libraries, -however. This strategy is used for all Linux architectures that aren't intel. -Note that one downside of this QEMU system is that threads are barely -implemented, so we're careful to not spawn many threads. - -For the rumprun target the only output is a kernel image, so we just use that -plus the `rumpbake` command to create a full kernel image which is then run from -within QEMU. - -Finally, the fun part, the BSDs. Quite a few hoops are jumped through to get CI -working for these platforms, but the gist of it looks like: - -* Cross compiling from Linux to any of the BSDs seems to be quite non-standard. - We may be able to get it working but it might be difficult at that point to - ensure that the libc definitions align with what you'd get on the BSD itself. - As a result, we try to do compiles within the BSD distro. -* On Travis we can't run a VM-in-a-VM, so we resort to userspace emulation - (QEMU). -* Unfortunately on Travis we also can't use KVM, so the emulation is super slow. - -With all that in mind, the way BSD is tested looks like: - -1. Download a pre-prepared image for the OS being tested. -2. Generate the tests for the OS being tested. This involves running the `ctest` - library over libc to generate a Rust file and a C file which will then be - compiled into the final test. -3. Generate a disk image which will later be mounted by the OS being tested. - This image is mostly just the libc directory, but some modifications are made - to compile the generated files from step 2. -4. The kernel is booted in QEMU, and it is configured to detect the libc-test - image being available, run the test script, and then shut down afterwards. -5. Look for whether the tests passed in the serial console output of the kernel. - -There's some pretty specific instructions for setting up each image (detailed -below), but the main gist of this is that we must avoid a vanilla `cargo run` -inside of the `libc-test` directory (which is what it's intended for) because -that would compile `syntex_syntax`, a large library, with userspace emulation. -This invariably times out on Travis, so we can't do that. - -Once all those hoops are jumped through, however, we can be happy that we're -testing almost everything! - -Below are some details of how to set up the initial OS images which are -downloaded. Each image must be enabled have input/output over the serial -console, log in automatically at the serial console, detect if a second drive in -QEMU is available, and if so mount it, run a script (it'll specifically be -`run-qemu.sh` in this folder which is copied into the generated image talked -about above), and then shut down. - -### QEMU setup - FreeBSD - -1. Download CD installer (most minimal is fine) -2. `qemu-img create -f qcow2 foo.qcow2 2G` -3. `qemu -cdrom foo.iso -drive if=virtio,file=foo.qcow2 -net nic,model=virtio -net user` -4. run installer -5. `echo 'console="comconsole"' >> /boot/loader.conf` -6. `echo 'autoboot_delay="0"' >> /boot/loader.conf` -7. look at /etc/ttys, see what getty argument is for ttyu0 -8. edit /etc/gettytab, look for ttyu0 argument, prepend `:al=root` to line - beneath - -(note that the current image has a `freebsd` user, but this isn't really -necessary) - -Once that's done, arrange for this script to run at login: - -``` -#!/bin/sh - -sudo kldload ext2fs -[ -e /dev/vtbd1 ] || exit 0 -sudo mount -t ext2fs /dev/vtbd1 /mnt -sh /mnt/run.sh /mnt -sudo poweroff -``` - -Helpful links - -* https://en.wikibooks.org/wiki/QEMU/Images -* https://blog.nekoconeko.nl/blog/2015/06/04/creating-an-openstack-freebsd-image.html -* https://www.freebsd.org/doc/handbook/serialconsole-setup.html - - -### QEMU setup - OpenBSD - -1. Download CD installer -2. `qemu-img create -f qcow2 foo.qcow2 2G` -3. `qemu -cdrom foo.iso -drive if=virtio,file=foo.qcow2 -net nic,model=virtio -net user` -4. run installer -5. `echo 'set tty com0' >> /etc/boot.conf` -6. `echo 'boot' >> /etc/boot.conf` -7. Modify /etc/ttys, change the `tty00` at the end from 'unknown off' to - 'vt220 on secure' -8. Modify same line in /etc/ttys to have `"/root/foo.sh"` as the shell -9. Add this script to `/root/foo.sh` - -``` -#!/bin/sh -exec 1>/dev/tty00 -exec 2>&1 - -if mount -t ext2fs /dev/sd1c /mnt; then - sh /mnt/run.sh /mnt - shutdown -ph now -fi - -# limited shell... -exec /bin/sh < /dev/tty00 -``` - -10. `chmod +x /root/foo.sh` - -Helpful links: - -* https://en.wikibooks.org/wiki/QEMU/Images -* http://www.openbsd.org/faq/faq7.html#SerCon - -# Questions? - -Hopefully that's at least somewhat of an introduction to everything going on -here, and feel free to ping @alexcrichton with questions! - diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/run-docker.sh cargo-0.19.0/vendor/libc-0.2.18/ci/run-docker.sh --- cargo-0.17.0/vendor/libc-0.2.18/ci/run-docker.sh 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/run-docker.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -# Small script to run tests for a target (or all targets) inside all the -# respective docker images. - -set -ex - -run() { - echo $1 - docker build -t libc ci/docker/$1 - mkdir -p target - docker run \ - --user `id -u`:`id -g` \ - --rm \ - --volume $HOME/.cargo:/cargo \ - --env CARGO_HOME=/cargo \ - --volume `rustc --print sysroot`:/rust:ro \ - --volume `pwd`:/checkout:ro \ - --volume `pwd`/target:/checkout/target \ - --env CARGO_TARGET_DIR=/checkout/target \ - --workdir /checkout \ - --privileged \ - --interactive \ - --tty \ - libc \ - ci/run.sh $1 -} - -if [ -z "$1" ]; then - for d in `ls ci/docker/`; do - run $d - done -else - run $1 -fi diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/run-qemu.sh cargo-0.19.0/vendor/libc-0.2.18/ci/run-qemu.sh --- cargo-0.17.0/vendor/libc-0.2.18/ci/run-qemu.sh 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/run-qemu.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -# Initial script which is run inside of all qemu images. The first argument to -# this script (as arranged by the qemu image itself) is the path to where the -# libc crate is mounted. -# -# For qemu images we currently need to install Rust manually as this wasn't done -# by the initial run-travis.sh script -# -# FIXME: feels like run-travis.sh should be responsible for downloading the -# compiler. - -set -ex - -ROOT=$1 -cp -r $ROOT/libc /tmp/libc -cd /tmp/libc - -TARGET=$(cat $ROOT/TARGET) -export CARGO_TARGET_DIR=/tmp - -case $TARGET in - *-openbsd) - pkg_add cargo gcc%4.9 rust - export CC=egcc - ;; - - *) - echo "Unknown target: $TARGET" - exit 1 - ;; -esac - -exec sh ci/run.sh $TARGET diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/run.sh cargo-0.19.0/vendor/libc-0.2.18/ci/run.sh --- cargo-0.17.0/vendor/libc-0.2.18/ci/run.sh 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/run.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,160 +0,0 @@ -#!/bin/sh - -# Builds and runs tests for a particular target passed as an argument to this -# script. - -set -ex - -TARGET=$1 - -# If we're going to run tests inside of a qemu image, then we don't need any of -# the scripts below. Instead, download the image, prepare a filesystem which has -# the current state of this repository, and then run the image. -# -# It's assume that all images, when run with two disks, will run the `run.sh` -# script from the second which we place inside. -if [ "$QEMU" != "" ]; then - tmpdir=/tmp/qemu-img-creation - mkdir -p $tmpdir - - if [ -z "${QEMU#*.gz}" ]; then - # image is .gz : download and uncompress it - qemufile=$(echo ${QEMU%.gz} | sed 's/\//__/g') - if [ ! -f $tmpdir/$qemufile ]; then - curl https://s3.amazonaws.com/rust-lang-ci/libc/$QEMU | \ - gunzip -d > $tmpdir/$qemufile - fi - else - # plain qcow2 image: just download it - qemufile=$(echo ${QEMU} | sed 's/\//__/g') - if [ ! -f $tmpdir/$qemufile ]; then - curl https://s3.amazonaws.com/rust-lang-ci/libc/$QEMU \ - > $tmpdir/$qemufile - fi - fi - - # Create a mount a fresh new filesystem image that we'll later pass to QEMU. - # This will have a `run.sh` script will which use the artifacts inside to run - # on the host. - rm -f $tmpdir/libc-test.img - mkdir $tmpdir/mount - - # If we have a cross compiler, then we just do the standard rigamarole of - # cross-compiling an executable and then the script to run just executes the - # binary. - # - # If we don't have a cross-compiler, however, then we need to do some crazy - # acrobatics to get this to work. Generate all.{c,rs} on the host which will - # be compiled inside QEMU. Do this here because compiling syntex_syntax in - # QEMU would time out basically everywhere. - if [ "$CAN_CROSS" = "1" ]; then - cargo build --manifest-path libc-test/Cargo.toml --target $TARGET - cp $CARGO_TARGET_DIR/$TARGET/debug/libc-test $tmpdir/mount/ - echo 'exec $1/libc-test' > $tmpdir/mount/run.sh - else - rm -rf $tmpdir/generated - mkdir -p $tmpdir/generated - cargo build --manifest-path libc-test/generate-files/Cargo.toml - (cd libc-test && TARGET=$TARGET OUT_DIR=$tmpdir/generated SKIP_COMPILE=1 \ - $CARGO_TARGET_DIR/debug/generate-files) - - # Copy this folder into the mounted image, the `run.sh` entry point, and - # overwrite the standard libc-test Cargo.toml with the overlay one which will - # assume the all.{c,rs} test files have already been generated - mkdir $tmpdir/mount/libc - cp -r Cargo.* libc-test src ci $tmpdir/mount/libc/ - ln -s libc-test/target $tmpdir/mount/libc/target - cp ci/run-qemu.sh $tmpdir/mount/run.sh - echo $TARGET | tee -a $tmpdir/mount/TARGET - cp $tmpdir/generated/* $tmpdir/mount/libc/libc-test - cp libc-test/run-generated-Cargo.toml $tmpdir/mount/libc/libc-test/Cargo.toml - fi - - du -sh $tmpdir/mount - genext2fs \ - --root $tmpdir/mount \ - --size-in-blocks 100000 \ - $tmpdir/libc-test.img - - # Pass -snapshot to prevent tampering with the disk images, this helps when - # running this script in development. The two drives are then passed next, - # first is the OS and second is the one we just made. Next the network is - # configured to work (I'm not entirely sure how), and then finally we turn off - # graphics and redirect the serial console output to out.log. - qemu-system-x86_64 \ - -m 1024 \ - -snapshot \ - -drive if=virtio,file=$tmpdir/$qemufile \ - -drive if=virtio,file=$tmpdir/libc-test.img \ - -net nic,model=virtio \ - -net user \ - -nographic \ - -vga none 2>&1 | tee $CARGO_TARGET_DIR/out.log - exec grep "^PASSED .* tests" $CARGO_TARGET_DIR/out.log -fi - -case "$TARGET" in - *-apple-ios) - cargo rustc --manifest-path libc-test/Cargo.toml --target $TARGET -- \ - -C link-args=-mios-simulator-version-min=7.0 - ;; - - *) - cargo build --manifest-path libc-test/Cargo.toml --target $TARGET - ;; -esac - -case "$TARGET" in - arm-linux-androideabi) - emulator @arm-21 -no-window & - adb wait-for-device - adb push $CARGO_TARGET_DIR/$TARGET/debug/libc-test /data/libc-test - adb shell /data/libc-test 2>&1 | tee /tmp/out - grep "^PASSED .* tests" /tmp/out - ;; - - arm-unknown-linux-gnueabihf) - qemu-arm -L /usr/arm-linux-gnueabihf $CARGO_TARGET_DIR/$TARGET/debug/libc-test - ;; - - mips-unknown-linux-gnu) - qemu-mips -L /usr/mips-linux-gnu $CARGO_TARGET_DIR/$TARGET/debug/libc-test - ;; - - mips64-unknown-linux-gnuabi64) - qemu-mips64 -L /usr/mips64-linux-gnuabi64 $CARGO_TARGET_DIR/$TARGET/debug/libc-test - ;; - - mips-unknown-linux-musl) - qemu-mips -L /toolchain/staging_dir/toolchain-mips_34kc_gcc-5.3.0_musl-1.1.15 \ - $CARGO_TARGET_DIR/$TARGET/debug/libc-test - ;; - - mipsel-unknown-linux-musl) - qemu-mipsel -L /toolchain $CARGO_TARGET_DIR/$TARGET/debug/libc-test - ;; - - powerpc-unknown-linux-gnu) - qemu-ppc -L /usr/powerpc-linux-gnu $CARGO_TARGET_DIR/$TARGET/debug/libc-test - ;; - - powerpc64-unknown-linux-gnu) - qemu-ppc64 -L /usr/powerpc64-linux-gnu $CARGO_TARGET_DIR/$TARGET/debug/libc-test - ;; - - aarch64-unknown-linux-gnu) - qemu-aarch64 -L /usr/aarch64-linux-gnu/ $CARGO_TARGET_DIR/$TARGET/debug/libc-test - ;; - - *-rumprun-netbsd) - rumprun-bake hw_virtio /tmp/libc-test.img $CARGO_TARGET_DIR/$TARGET/debug/libc-test - qemu-system-x86_64 -nographic -vga none -m 64 \ - -kernel /tmp/libc-test.img 2>&1 | tee /tmp/out & - sleep 5 - grep "^PASSED .* tests" /tmp/out - ;; - - *) - $CARGO_TARGET_DIR/$TARGET/debug/libc-test - ;; -esac diff -Nru cargo-0.17.0/vendor/libc-0.2.18/ci/style.rs cargo-0.19.0/vendor/libc-0.2.18/ci/style.rs --- cargo-0.17.0/vendor/libc-0.2.18/ci/style.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/ci/style.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,204 +0,0 @@ -//! Simple script to verify the coding style of this library -//! -//! ## How to run -//! -//! The first argument to this script is the directory to run on, so running -//! this script should be as simple as: -//! -//! ```notrust -//! rustc ci/style.rs -//! ./style src -//! ``` -//! -//! ## Guidelines -//! -//! The current style is: -//! -//! * No trailing whitespace -//! * No tabs -//! * 80-character lines -//! * `extern` instead of `extern "C"` -//! * Specific module layout: -//! 1. use directives -//! 2. typedefs -//! 3. structs -//! 4. constants -//! 5. f! { ... } functions -//! 6. extern functions -//! 7. modules + pub use -//! -//! Things not verified: -//! -//! * alignment -//! * 4-space tabs -//! * leading colons on paths - -use std::env; -use std::fs; -use std::io::prelude::*; -use std::path::Path; - -macro_rules! t { - ($e:expr) => (match $e { - Ok(e) => e, - Err(e) => panic!("{} failed with {}", stringify!($e), e), - }) -} - -fn main() { - let arg = env::args().skip(1).next().unwrap_or(".".to_string()); - - let mut errors = Errors { errs: false }; - walk(Path::new(&arg), &mut errors); - - if errors.errs { - panic!("found some lint errors"); - } else { - println!("good style!"); - } -} - -fn walk(path: &Path, err: &mut Errors) { - for entry in t!(path.read_dir()).map(|e| t!(e)) { - let path = entry.path(); - if t!(entry.file_type()).is_dir() { - walk(&path, err); - continue - } - - let name = entry.file_name().into_string().unwrap(); - match &name[..] { - n if !n.ends_with(".rs") => continue, - - "dox.rs" | - "lib.rs" | - "macros.rs" => continue, - - _ => {} - } - - let mut contents = String::new(); - t!(t!(fs::File::open(&path)).read_to_string(&mut contents)); - - check_style(&contents, &path, err); - } -} - -struct Errors { - errs: bool, -} - -#[derive(Clone, Copy, PartialEq)] -enum State { - Start, - Imports, - Typedefs, - Structs, - Constants, - FunctionDefinitions, - Functions, - Modules, -} - -fn check_style(file: &str, path: &Path, err: &mut Errors) { - let mut state = State::Start; - let mut s_macros = 0; - let mut f_macros = 0; - let mut prev_blank = false; - - for (i, line) in file.lines().enumerate() { - if line == "" { - if prev_blank { - err.error(path, i, "double blank line"); - } - prev_blank = true; - } else { - prev_blank = false; - } - if line != line.trim_right() { - err.error(path, i, "trailing whitespace"); - } - if line.contains("\t") { - err.error(path, i, "tab character"); - } - if line.len() > 80 { - err.error(path, i, "line longer than 80 chars"); - } - if line.contains("extern \"C\"") { - err.error(path, i, "use `extern` instead of `extern \"C\""); - } - if line.contains("#[cfg(") && !line.contains(" if ") { - if state != State::Structs { - err.error(path, i, "use cfg_if! and submodules \ - instead of #[cfg]"); - } - } - - let line = line.trim_left(); - let is_pub = line.starts_with("pub "); - let line = if is_pub {&line[4..]} else {line}; - - let line_state = if line.starts_with("use ") { - if is_pub { - State::Modules - } else { - State::Imports - } - } else if line.starts_with("const ") { - State::Constants - } else if line.starts_with("type ") { - State::Typedefs - } else if line.starts_with("s! {") { - s_macros += 1; - State::Structs - } else if line.starts_with("f! {") { - f_macros += 1; - State::FunctionDefinitions - } else if line.starts_with("extern ") { - State::Functions - } else if line.starts_with("mod ") { - State::Modules - } else { - continue - }; - - if state as usize > line_state as usize { - err.error(path, i, &format!("{} found after {} when \ - it belongs before", - line_state.desc(), state.desc())); - } - - if f_macros == 2 { - f_macros += 1; - err.error(path, i, "multiple f! macros in one module"); - } - if s_macros == 2 { - s_macros += 1; - err.error(path, i, "multiple s! macros in one module"); - } - - state = line_state; - } -} - -impl State { - fn desc(&self) -> &str { - match *self { - State::Start => "start", - State::Imports => "import", - State::Typedefs => "typedef", - State::Structs => "struct", - State::Constants => "constant", - State::FunctionDefinitions => "function definition", - State::Functions => "extern function", - State::Modules => "module", - } - } -} - -impl Errors { - fn error(&mut self, path: &Path, line: usize, msg: &str) { - self.errs = true; - println!("{}:{} - {}", path.display(), line + 1, msg); - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/.gitignore cargo-0.19.0/vendor/libc-0.2.18/.gitignore --- cargo-0.17.0/vendor/libc-0.2.18/.gitignore 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/.gitignore 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -target -Cargo.lock -*~ diff -Nru cargo-0.17.0/vendor/libc-0.2.18/LICENSE-APACHE cargo-0.19.0/vendor/libc-0.2.18/LICENSE-APACHE --- cargo-0.17.0/vendor/libc-0.2.18/LICENSE-APACHE 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.17.0/vendor/libc-0.2.18/LICENSE-MIT cargo-0.19.0/vendor/libc-0.2.18/LICENSE-MIT --- cargo-0.17.0/vendor/libc-0.2.18/LICENSE-MIT 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.17.0/vendor/libc-0.2.18/README.md cargo-0.19.0/vendor/libc-0.2.18/README.md --- cargo-0.17.0/vendor/libc-0.2.18/README.md 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,137 +0,0 @@ -libc -==== - -A Rust library with native bindings to the types and functions commonly found on -various systems, including libc. - -[![Build Status](https://travis-ci.org/rust-lang/libc.svg?branch=master)](https://travis-ci.org/rust-lang/libc) -[![Build status](https://ci.appveyor.com/api/projects/status/github/rust-lang/libc?svg=true)](https://ci.appveyor.com/project/rust-lang-libs/libc) - -[Documentation](#platforms-and-documentation) - -## Usage - -First, add the following to your `Cargo.toml`: - -```toml -[dependencies] -libc = "0.2" -``` - -Next, add this to your crate root: - -```rust -extern crate libc; -``` - -Currently libc by default links to the standard library, but if you would -instead like to use libc in a `#![no_std]` situation or crate you can request -this via: - -```toml -[dependencies] -libc = { version = "0.2", default-features = false } -``` - -## What is libc? - -The primary purpose of this crate is to provide all of the definitions necessary -to easily interoperate with C code (or "C-like" code) on each of the platforms -that Rust supports. This includes type definitions (e.g. `c_int`), constants -(e.g. `EINVAL`) as well as function headers (e.g. `malloc`). - -This crate does not strive to have any form of compatibility across platforms, -but rather it is simply a straight binding to the system libraries on the -platform in question. - -## Public API - -This crate exports all underlying platform types, functions, and constants under -the crate root, so all items are accessible as `libc::foo`. The types and values -of all the exported APIs match the platform that libc is compiled for. - -More detailed information about the design of this library can be found in its -[associated RFC][rfc]. - -[rfc]: https://github.com/rust-lang/rfcs/blob/master/text/1291-promote-libc.md - -## Adding an API - -Want to use an API which currently isn't bound in `libc`? It's quite easy to add -one! - -The internal structure of this crate is designed to minimize the number of -`#[cfg]` attributes in order to easily be able to add new items which apply -to all platforms in the future. As a result, the crate is organized -hierarchically based on platform. Each module has a number of `#[cfg]`'d -children, but only one is ever actually compiled. Each module then reexports all -the contents of its children. - -This means that for each platform that libc supports, the path from a -leaf module to the root will contain all bindings for the platform in question. -Consequently, this indicates where an API should be added! Adding an API at a -particular level in the hierarchy means that it is supported on all the child -platforms of that level. For example, when adding a Unix API it should be added -to `src/unix/mod.rs`, but when adding a Linux-only API it should be added to -`src/unix/notbsd/linux/mod.rs`. - -If you're not 100% sure at what level of the hierarchy an API should be added -at, fear not! This crate has CI support which tests any binding against all -platforms supported, so you'll see failures if an API is added at the wrong -level or has different signatures across platforms. - -With that in mind, the steps for adding a new API are: - -1. Determine where in the module hierarchy your API should be added. -2. Add the API. -3. Send a PR to this repo. -4. Wait for CI to pass, fixing errors. -5. Wait for a merge! - -### Test before you commit - -We have two automated tests running on [Travis](https://travis-ci.org/rust-lang/libc): - -1. [`libc-test`](https://github.com/alexcrichton/ctest) - - `cd libc-test && cargo run` - - Use the `skip_*()` functions in `build.rs` if you really need a workaround. -2. Style checker - - `rustc ci/style.rs && ./style src` - -## Platforms and Documentation - -The following platforms are currently tested and have documentation available: - -Tested: - * [`i686-pc-windows-msvc`](https://doc.rust-lang.org/libc/i686-pc-windows-msvc/libc/) - * [`x86_64-pc-windows-msvc`](https://doc.rust-lang.org/libc/x86_64-pc-windows-msvc/libc/) - (Windows) - * [`i686-pc-windows-gnu`](https://doc.rust-lang.org/libc/i686-pc-windows-gnu/libc/) - * [`x86_64-pc-windows-gnu`](https://doc.rust-lang.org/libc/x86_64-pc-windows-gnu/libc/) - * [`i686-apple-darwin`](https://doc.rust-lang.org/libc/i686-apple-darwin/libc/) - * [`x86_64-apple-darwin`](https://doc.rust-lang.org/libc/x86_64-apple-darwin/libc/) - (OSX) - * `i686-apple-ios` - * `x86_64-apple-ios` - * [`i686-unknown-linux-gnu`](https://doc.rust-lang.org/libc/i686-unknown-linux-gnu/libc/) - * [`x86_64-unknown-linux-gnu`](https://doc.rust-lang.org/libc/x86_64-unknown-linux-gnu/libc/) - (Linux) - * [`x86_64-unknown-linux-musl`](https://doc.rust-lang.org/libc/x86_64-unknown-linux-musl/libc/) - (Linux MUSL) - * [`aarch64-unknown-linux-gnu`](https://doc.rust-lang.org/libc/aarch64-unknown-linux-gnu/libc/) - * [`mips-unknown-linux-gnu`](https://doc.rust-lang.org/libc/mips-unknown-linux-gnu/libc/) - * [`arm-unknown-linux-gnueabihf`](https://doc.rust-lang.org/libc/arm-unknown-linux-gnueabihf/libc/) - * [`arm-linux-androideabi`](https://doc.rust-lang.org/libc/arm-linux-androideabi/libc/) - (Android) - * [`x86_64-unknown-freebsd`](https://doc.rust-lang.org/libc/x86_64-unknown-freebsd/libc/) - * [`x86_64-unknown-openbsd`](https://doc.rust-lang.org/libc/x86_64-unknown-openbsd/libc/) - * [`x86_64-rumprun-netbsd`](https://doc.rust-lang.org/libc/x86_64-unknown-netbsd/libc/) - -The following may be supported, but are not guaranteed to always work: - - * `i686-unknown-freebsd` - * [`x86_64-unknown-bitrig`](https://doc.rust-lang.org/libc/x86_64-unknown-bitrig/libc/) - * [`x86_64-unknown-dragonfly`](https://doc.rust-lang.org/libc/x86_64-unknown-dragonfly/libc/) - * `i686-unknown-haiku` - * `x86_64-unknown-haiku` - * [`x86_64-unknown-netbsd`](https://doc.rust-lang.org/libc/x86_64-unknown-netbsd/libc/) diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/dox.rs cargo-0.19.0/vendor/libc-0.2.18/src/dox.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/dox.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/dox.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,134 +0,0 @@ -pub use self::imp::*; - -#[cfg(not(dox))] -mod imp { - pub use core::option::Option; - pub use core::clone::Clone; - pub use core::marker::Copy; - pub use core::mem; -} - -#[cfg(dox)] -mod imp { - pub enum Option { - Some(T), - None, - } - impl Copy for Option {} - impl Clone for Option { - fn clone(&self) -> Option { loop {} } - } - - pub trait Clone { - fn clone(&self) -> Self; - } - - #[lang = "copy"] - pub trait Copy {} - - #[lang = "sync"] - pub trait Sync {} - impl Sync for T {} - - #[lang = "sized"] - pub trait Sized {} - - macro_rules! each_int { - ($mac:ident) => ( - $mac!(u8); - $mac!(u16); - $mac!(u32); - $mac!(u64); - $mac!(usize); - $mac!(i8); - $mac!(i16); - $mac!(i32); - $mac!(i64); - $mac!(isize); - ) - } - - #[lang = "div"] - pub trait Div { - type Output; - fn div(self, rhs: RHS) -> Self::Output; - } - - macro_rules! impl_div { - ($($i:ident)*) => ($( - impl Div<$i> for $i { - type Output = $i; - fn div(self, rhs: $i) -> $i { self / rhs } - } - )*) - } - each_int!(impl_div); - - #[lang = "shl"] - pub trait Shl { - type Output; - fn shl(self, rhs: RHS) -> Self::Output; - } - - macro_rules! impl_shl { - ($($i:ident)*) => ($( - impl Shl<$i> for $i { - type Output = $i; - fn shl(self, rhs: $i) -> $i { self << rhs } - } - )*) - } - each_int!(impl_shl); - - #[lang = "mul"] - pub trait Mul { - type Output; - fn mul(self, rhs: RHS) -> Self::Output; - } - - macro_rules! impl_mul { - ($($i:ident)*) => ($( - impl Mul for $i { - type Output = $i; - fn mul(self, rhs: $i) -> $i { self * rhs } - } - )*) - } - each_int!(impl_mul); - - #[lang = "sub"] - pub trait Sub { - type Output; - fn sub(self, rhs: RHS) -> Self::Output; - } - - macro_rules! impl_sub { - ($($i:ident)*) => ($( - impl Sub for $i { - type Output = $i; - fn sub(self, rhs: $i) -> $i { self - rhs } - } - )*) - } - each_int!(impl_sub); - - #[lang = "bitor"] - pub trait Bitor { - type Output; - fn bitor(self, rhs: RHS) -> Self::Output; - } - - macro_rules! impl_bitor { - ($($i:ident)*) => ($( - impl Bitor for $i { - type Output = $i; - fn bitor(self, rhs: $i) -> $i { self | rhs } - } - )*) - } - each_int!(impl_bitor); - - pub mod mem { - pub fn size_of_val(_: &T) -> usize { 4 } - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/lib.rs cargo-0.19.0/vendor/libc-0.2.18/src/lib.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/lib.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,278 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Crate docs - -#![allow(bad_style, overflowing_literals, improper_ctypes)] -#![crate_type = "rlib"] -#![crate_name = "libc"] -#![cfg_attr(dox, feature(no_core, lang_items))] -#![cfg_attr(dox, no_core)] -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico")] - -#![cfg_attr(all(target_os = "linux", target_arch = "x86_64"), doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-linux-gnu" -))] -#![cfg_attr(all(target_os = "linux", target_arch = "x86"), doc( - html_root_url = "https://doc.rust-lang.org/libc/i686-unknown-linux-gnu" -))] -#![cfg_attr(all(target_os = "linux", target_arch = "arm"), doc( - html_root_url = "https://doc.rust-lang.org/libc/arm-unknown-linux-gnueabihf" -))] -#![cfg_attr(all(target_os = "linux", target_arch = "mips"), doc( - html_root_url = "https://doc.rust-lang.org/libc/mips-unknown-linux-gnu" -))] -#![cfg_attr(all(target_os = "linux", target_arch = "aarch64"), doc( - html_root_url = "https://doc.rust-lang.org/libc/aarch64-unknown-linux-gnu" -))] -#![cfg_attr(all(target_os = "linux", target_env = "musl"), doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-linux-musl" -))] -#![cfg_attr(all(target_os = "macos", target_arch = "x86_64"), doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-apple-darwin" -))] -#![cfg_attr(all(target_os = "macos", target_arch = "x86"), doc( - html_root_url = "https://doc.rust-lang.org/libc/i686-apple-darwin" -))] -#![cfg_attr(all(windows, target_arch = "x86_64", target_env = "gnu"), doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-pc-windows-gnu" -))] -#![cfg_attr(all(windows, target_arch = "x86", target_env = "gnu"), doc( - html_root_url = "https://doc.rust-lang.org/libc/i686-pc-windows-gnu" -))] -#![cfg_attr(all(windows, target_arch = "x86_64", target_env = "msvc"), doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-pc-windows-msvc" -))] -#![cfg_attr(all(windows, target_arch = "x86", target_env = "msvc"), doc( - html_root_url = "https://doc.rust-lang.org/libc/i686-pc-windows-msvc" -))] -#![cfg_attr(target_os = "android", doc( - html_root_url = "https://doc.rust-lang.org/libc/arm-linux-androideabi" -))] -#![cfg_attr(target_os = "freebsd", doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-freebsd" -))] -#![cfg_attr(target_os = "openbsd", doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-openbsd" -))] -#![cfg_attr(target_os = "bitrig", doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-bitrig" -))] -#![cfg_attr(target_os = "netbsd", doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-netbsd" -))] -#![cfg_attr(target_os = "dragonfly", doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-dragonfly" -))] - -// Attributes needed when building as part of the standard library -#![cfg_attr(stdbuild, feature(no_std, core, core_slice_ext, staged_api, custom_attribute, cfg_target_vendor))] -#![cfg_attr(stdbuild, feature(link_cfg))] -#![cfg_attr(stdbuild, no_std)] -#![cfg_attr(stdbuild, staged_api)] -#![cfg_attr(stdbuild, allow(warnings))] -#![cfg_attr(stdbuild, unstable(feature = "libc", - reason = "use `libc` from crates.io", - issue = "27783"))] - -#![cfg_attr(not(feature = "use_std"), no_std)] - -#[cfg(all(not(stdbuild), not(dox), feature = "use_std"))] -extern crate std as core; - -#[macro_use] mod macros; -mod dox; - -// Use repr(u8) as LLVM expects `void*` to be the same as `i8*` to help enable -// more optimization opportunities around it recognizing things like -// malloc/free. -#[repr(u8)] -pub enum c_void { - // Two dummy variants so the #[repr] attribute can be used. - #[doc(hidden)] - __variant1, - #[doc(hidden)] - __variant2, -} - -pub type int8_t = i8; -pub type int16_t = i16; -pub type int32_t = i32; -pub type int64_t = i64; -pub type uint8_t = u8; -pub type uint16_t = u16; -pub type uint32_t = u32; -pub type uint64_t = u64; - -pub type c_schar = i8; -pub type c_uchar = u8; -pub type c_short = i16; -pub type c_ushort = u16; -pub type c_int = i32; -pub type c_uint = u32; -pub type c_float = f32; -pub type c_double = f64; -pub type c_longlong = i64; -pub type c_ulonglong = u64; -pub type intmax_t = i64; -pub type uintmax_t = u64; - -pub type size_t = usize; -pub type ptrdiff_t = isize; -pub type intptr_t = isize; -pub type uintptr_t = usize; -pub type ssize_t = isize; - -pub enum FILE {} -pub enum fpos_t {} // TODO: fill this out with a struct - -extern { - pub fn isalnum(c: c_int) -> c_int; - pub fn isalpha(c: c_int) -> c_int; - pub fn iscntrl(c: c_int) -> c_int; - pub fn isdigit(c: c_int) -> c_int; - pub fn isgraph(c: c_int) -> c_int; - pub fn islower(c: c_int) -> c_int; - pub fn isprint(c: c_int) -> c_int; - pub fn ispunct(c: c_int) -> c_int; - pub fn isspace(c: c_int) -> c_int; - pub fn isupper(c: c_int) -> c_int; - pub fn isxdigit(c: c_int) -> c_int; - pub fn tolower(c: c_int) -> c_int; - pub fn toupper(c: c_int) -> c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "fopen$UNIX2003")] - pub fn fopen(filename: *const c_char, - mode: *const c_char) -> *mut FILE; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "freopen$UNIX2003")] - pub fn freopen(filename: *const c_char, mode: *const c_char, - file: *mut FILE) -> *mut FILE; - pub fn fflush(file: *mut FILE) -> c_int; - pub fn fclose(file: *mut FILE) -> c_int; - pub fn remove(filename: *const c_char) -> c_int; - pub fn rename(oldname: *const c_char, newname: *const c_char) -> c_int; - pub fn tmpfile() -> *mut FILE; - pub fn setvbuf(stream: *mut FILE, - buffer: *mut c_char, - mode: c_int, - size: size_t) -> c_int; - pub fn setbuf(stream: *mut FILE, buf: *mut c_char); - pub fn getchar() -> c_int; - pub fn putchar(c: c_int) -> c_int; - pub fn fgetc(stream: *mut FILE) -> c_int; - pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; - pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "fputs$UNIX2003")] - pub fn fputs(s: *const c_char, stream: *mut FILE)-> c_int; - pub fn puts(s: *const c_char) -> c_int; - pub fn ungetc(c: c_int, stream: *mut FILE) -> c_int; - pub fn fread(ptr: *mut c_void, - size: size_t, - nobj: size_t, - stream: *mut FILE) - -> size_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "fwrite$UNIX2003")] - pub fn fwrite(ptr: *const c_void, - size: size_t, - nobj: size_t, - stream: *mut FILE) - -> size_t; - pub fn fseek(stream: *mut FILE, offset: c_long, whence: c_int) -> c_int; - pub fn ftell(stream: *mut FILE) -> c_long; - pub fn rewind(stream: *mut FILE); - #[cfg_attr(target_os = "netbsd", link_name = "__fgetpos50")] - pub fn fgetpos(stream: *mut FILE, ptr: *mut fpos_t) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__fsetpos50")] - pub fn fsetpos(stream: *mut FILE, ptr: *const fpos_t) -> c_int; - pub fn feof(stream: *mut FILE) -> c_int; - pub fn ferror(stream: *mut FILE) -> c_int; - pub fn perror(s: *const c_char); - pub fn atoi(s: *const c_char) -> c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "strtod$UNIX2003")] - pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; - pub fn strtol(s: *const c_char, - endp: *mut *mut c_char, base: c_int) -> c_long; - pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, - base: c_int) -> c_ulong; - pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; - pub fn malloc(size: size_t) -> *mut c_void; - pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; - pub fn free(p: *mut c_void); - pub fn abort() -> !; - pub fn exit(status: c_int) -> !; - pub fn _exit(status: c_int) -> !; - pub fn atexit(cb: extern fn()) -> c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "system$UNIX2003")] - pub fn system(s: *const c_char) -> c_int; - pub fn getenv(s: *const c_char) -> *mut c_char; - - pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; - pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) - -> *mut c_char; - pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; - pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; - pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; - pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strdup(cs: *const c_char) -> *mut c_char; - pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strlen(cs: *const c_char) -> size_t; - pub fn strnlen(cs: *const c_char, maxlen: size_t) -> size_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "strerror$UNIX2003")] - pub fn strerror(n: c_int) -> *mut c_char; - pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; - pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; - pub fn wcslen(buf: *const wchar_t) -> size_t; - - pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; - pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; -} - -// These are all inline functions on android, so they end up just being entirely -// missing on that platform. -#[cfg(not(target_os = "android"))] -extern { - pub fn abs(i: c_int) -> c_int; - pub fn atof(s: *const c_char) -> c_double; - pub fn labs(i: c_long) -> c_long; - pub fn rand() -> c_int; - pub fn srand(seed: c_uint); -} - -cfg_if! { - if #[cfg(windows)] { - mod windows; - pub use windows::*; - } else if #[cfg(target_os = "redox")] { - mod redox; - pub use redox::*; - } else if #[cfg(unix)] { - mod unix; - pub use unix::*; - } else { - // Unknown target_family - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/macros.rs cargo-0.19.0/vendor/libc-0.2.18/src/macros.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/macros.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/macros.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ -/// A macro for defining #[cfg] if-else statements. -/// -/// This is similar to the `if/elif` C preprocessor macro by allowing definition -/// of a cascade of `#[cfg]` cases, emitting the implementation which matches -/// first. -/// -/// This allows you to conveniently provide a long list #[cfg]'d blocks of code -/// without having to rewrite each clause multiple times. -macro_rules! cfg_if { - ($( - if #[cfg($($meta:meta),*)] { $($it:item)* } - ) else * else { - $($it2:item)* - }) => { - __cfg_if_items! { - () ; - $( ( ($($meta),*) ($($it)*) ), )* - ( () ($($it2)*) ), - } - } -} - -macro_rules! __cfg_if_items { - (($($not:meta,)*) ; ) => {}; - (($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { - __cfg_if_apply! { cfg(all(not(any($($not),*)), $($m,)*)), $($it)* } - __cfg_if_items! { ($($not,)* $($m,)*) ; $($rest)* } - } -} - -macro_rules! __cfg_if_apply { - ($m:meta, $($it:item)*) => { - $(#[$m] $it)* - } -} - -macro_rules! s { - ($($(#[$attr:meta])* pub struct $i:ident { $($field:tt)* })*) => ($( - __item! { - #[repr(C)] - $(#[$attr])* - pub struct $i { $($field)* } - } - impl ::dox::Copy for $i {} - impl ::dox::Clone for $i { - fn clone(&self) -> $i { *self } - } - )*) -} - -macro_rules! f { - ($(pub fn $i:ident($($arg:ident: $argty:ty),*) -> $ret:ty { - $($body:stmt);* - })*) => ($( - #[inline] - #[cfg(not(dox))] - pub unsafe extern fn $i($($arg: $argty),*) -> $ret { - $($body);* - } - - #[cfg(dox)] - #[allow(dead_code)] - pub unsafe extern fn $i($($arg: $argty),*) -> $ret { - loop {} - } - )*) -} - -macro_rules! __item { - ($i:item) => ($i) -} - -#[cfg(test)] -mod tests { - cfg_if! { - if #[cfg(test)] { - use std::option::Option as Option2; - fn works1() -> Option2 { Some(1) } - } else { - fn works1() -> Option { None } - } - } - - cfg_if! { - if #[cfg(foo)] { - fn works2() -> bool { false } - } else if #[cfg(test)] { - fn works2() -> bool { true } - } else { - fn works2() -> bool { false } - } - } - - cfg_if! { - if #[cfg(foo)] { - fn works3() -> bool { false } - } else { - fn works3() -> bool { true } - } - } - - #[test] - fn it_works() { - assert!(works1().is_some()); - assert!(works2()); - assert!(works3()); - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/redox.rs cargo-0.19.0/vendor/libc-0.2.18/src/redox.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/redox.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/redox.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -pub type c_char = i8; -pub type c_long = i64; -pub type c_ulong = u64; - -pub type wchar_t = i16; - -pub type off_t = usize; -pub type mode_t = u16; -pub type time_t = i64; -pub type pid_t = usize; -pub type gid_t = usize; -pub type uid_t = usize; - -pub type in_addr_t = u32; -pub type in_port_t = u16; - -pub type socklen_t = u32; -pub type sa_family_t = u16; - -s! { - pub struct in_addr { - pub s_addr: in_addr_t, - } - - pub struct in6_addr { - pub s6_addr: [u8; 16], - __align: [u32; 0], - } - - pub struct sockaddr { - pub sa_family: sa_family_t, - pub sa_data: [::c_char; 14], - } - - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: ::in_port_t, - pub sin_addr: ::in_addr, - pub sin_zero: [u8; 8], - } - - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: ::in6_addr, - pub sin6_scope_id: u32, - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/apple/b32.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/apple/b32.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/apple/b32.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/apple/b32.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -//! 32-bit specific Apple (ios/darwin) definitions - -pub type c_long = i32; -pub type c_ulong = u32; - -s! { - pub struct pthread_attr_t { - __sig: c_long, - __opaque: [::c_char; 36] - } -} - -pub const __PTHREAD_MUTEX_SIZE__: usize = 40; -pub const __PTHREAD_COND_SIZE__: usize = 24; -pub const __PTHREAD_CONDATTR_SIZE__: usize = 4; -pub const __PTHREAD_RWLOCK_SIZE__: usize = 124; - -pub const TIOCTIMESTAMP: ::c_ulong = 0x40087459; -pub const TIOCDCDTIMESTAMP: ::c_ulong = 0x40087458; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/apple/b64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/apple/b64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/apple/b64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/apple/b64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -//! 64-bit specific Apple (ios/darwin) definitions - -pub type c_long = i64; -pub type c_ulong = u64; - -s! { - pub struct pthread_attr_t { - __sig: c_long, - __opaque: [::c_char; 56] - } -} - -pub const __PTHREAD_MUTEX_SIZE__: usize = 56; -pub const __PTHREAD_COND_SIZE__: usize = 40; -pub const __PTHREAD_CONDATTR_SIZE__: usize = 8; -pub const __PTHREAD_RWLOCK_SIZE__: usize = 192; - -pub const TIOCTIMESTAMP: ::c_ulong = 0x40107459; -pub const TIOCDCDTIMESTAMP: ::c_ulong = 0x40107458; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/apple/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/apple/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/apple/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/apple/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1545 +0,0 @@ -//! Apple (ios/darwin)-specific definitions -//! -//! This covers *-apple-* triples currently - -pub type clock_t = c_ulong; -pub type time_t = c_long; -pub type suseconds_t = i32; -pub type dev_t = i32; -pub type ino_t = u64; -pub type mode_t = u16; -pub type nlink_t = u16; -pub type blksize_t = i32; -pub type rlim_t = u64; -pub type mach_timebase_info_data_t = mach_timebase_info; -pub type pthread_key_t = c_ulong; -pub type sigset_t = u32; -pub type fsblkcnt_t = ::c_uint; -pub type fsfilcnt_t = ::c_uint; -pub type speed_t = ::c_ulong; -pub type tcflag_t = ::c_ulong; -pub type nl_item = ::c_int; -pub type id_t = ::c_uint; -pub type sem_t = ::c_int; - -pub enum timezone {} - -s! { - pub struct aiocb { - pub aio_fildes: ::c_int, - pub aio_offset: ::off_t, - pub aio_buf: *mut ::c_void, - pub aio_nbytes: ::size_t, - pub aio_reqprio: ::c_int, - pub aio_sigevent: sigevent, - pub aio_lio_opcode: ::c_int - } - - pub struct utmpx { - pub ut_user: [::c_char; _UTX_USERSIZE], - pub ut_id: [::c_char; _UTX_IDSIZE], - pub ut_line: [::c_char; _UTX_LINESIZE], - pub ut_pid: ::pid_t, - pub ut_type: ::c_short, - pub ut_tv: ::timeval, - pub ut_host: [::c_char; _UTX_HOSTSIZE], - ut_pad: [::uint32_t; 16], - } - - pub struct glob_t { - pub gl_pathc: ::size_t, - __unused1: ::c_int, - pub gl_offs: ::size_t, - __unused2: ::c_int, - pub gl_pathv: *mut *mut ::c_char, - - __unused3: *mut ::c_void, - - __unused4: *mut ::c_void, - __unused5: *mut ::c_void, - __unused6: *mut ::c_void, - __unused7: *mut ::c_void, - __unused8: *mut ::c_void, - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: ::sa_family_t, - __ss_pad1: [u8; 6], - __ss_align: i64, - __ss_pad2: [u8; 112], - } - - pub struct addrinfo { - pub ai_flags: ::c_int, - pub ai_family: ::c_int, - pub ai_socktype: ::c_int, - pub ai_protocol: ::c_int, - pub ai_addrlen: ::socklen_t, - pub ai_canonname: *mut ::c_char, - pub ai_addr: *mut ::sockaddr, - pub ai_next: *mut addrinfo, - } - - pub struct mach_timebase_info { - pub numer: u32, - pub denom: u32, - } - - pub struct stat { - pub st_dev: dev_t, - pub st_mode: mode_t, - pub st_nlink: nlink_t, - pub st_ino: ino_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: dev_t, - pub st_atime: time_t, - pub st_atime_nsec: c_long, - pub st_mtime: time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: time_t, - pub st_ctime_nsec: c_long, - pub st_birthtime: time_t, - pub st_birthtime_nsec: c_long, - pub st_size: ::off_t, - pub st_blocks: ::blkcnt_t, - pub st_blksize: blksize_t, - pub st_flags: ::uint32_t, - pub st_gen: ::uint32_t, - pub st_lspare: ::int32_t, - pub st_qspare: [::int64_t; 2], - } - - pub struct dirent { - pub d_ino: u64, - pub d_seekoff: u64, - pub d_reclen: u16, - pub d_namlen: u16, - pub d_type: u8, - pub d_name: [::c_char; 1024], - } - - pub struct pthread_mutex_t { - __sig: ::c_long, - __opaque: [u8; __PTHREAD_MUTEX_SIZE__], - } - - pub struct pthread_mutexattr_t { - __sig: ::c_long, - __opaque: [u8; 8], - } - - pub struct pthread_cond_t { - __sig: ::c_long, - __opaque: [u8; __PTHREAD_COND_SIZE__], - } - - pub struct pthread_condattr_t { - __sig: ::c_long, - __opaque: [u8; __PTHREAD_CONDATTR_SIZE__], - } - - pub struct pthread_rwlock_t { - __sig: ::c_long, - __opaque: [u8; __PTHREAD_RWLOCK_SIZE__], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_errno: ::c_int, - pub si_code: ::c_int, - pub si_pid: ::pid_t, - pub si_uid: ::uid_t, - pub si_status: ::c_int, - pub si_addr: *mut ::c_void, - _pad: [usize; 9], - } - - pub struct sigaction { - pub sa_sigaction: ::sighandler_t, - pub sa_mask: sigset_t, - pub sa_flags: ::c_int, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_size: ::size_t, - pub ss_flags: ::c_int, - } - - pub struct fstore_t { - pub fst_flags: ::c_uint, - pub fst_posmode: ::c_int, - pub fst_offset: ::off_t, - pub fst_length: ::off_t, - pub fst_bytesalloc: ::off_t, - } - - pub struct radvisory { - pub ra_offset: ::off_t, - pub ra_count: ::c_int, - } - - pub struct statvfs { - pub f_bsize: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_favail: ::fsfilcnt_t, - pub f_fsid: ::c_ulong, - pub f_flag: ::c_ulong, - pub f_namemax: ::c_ulong, - } - - pub struct Dl_info { - pub dli_fname: *const ::c_char, - pub dli_fbase: *mut ::c_void, - pub dli_sname: *const ::c_char, - pub dli_saddr: *mut ::c_void, - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: ::sa_family_t, - pub sin_port: ::in_port_t, - pub sin_addr: ::in_addr, - pub sin_zero: [::c_char; 8], - } - - pub struct statfs { - pub f_bsize: ::uint32_t, - pub f_iosize: ::int32_t, - pub f_blocks: ::uint64_t, - pub f_bfree: ::uint64_t, - pub f_bavail: ::uint64_t, - pub f_files: ::uint64_t, - pub f_ffree: ::uint64_t, - pub f_fsid: ::fsid_t, - pub f_owner: ::uid_t, - pub f_type: ::uint32_t, - pub f_flags: ::uint32_t, - pub f_fssubtype: ::uint32_t, - pub f_fstypename: [::c_char; 16], - pub f_mntonname: [::c_char; 1024], - pub f_mntfromname: [::c_char; 1024], - pub f_reserved: [::uint32_t; 8], - } - - // FIXME: this should have align 4 but it's got align 8 on 64-bit - pub struct kevent { - pub ident: ::uintptr_t, - pub filter: ::int16_t, - pub flags: ::uint16_t, - pub fflags: ::uint32_t, - pub data: ::intptr_t, - pub udata: *mut ::c_void, - } - - pub struct kevent64_s { - pub ident: ::uint64_t, - pub filter: ::int16_t, - pub flags: ::uint16_t, - pub fflags: ::uint32_t, - pub data: ::int64_t, - pub udata: ::uint64_t, - pub ext: [::uint64_t; 2], - } - - pub struct dqblk { - pub dqb_bhardlimit: ::uint64_t, - pub dqb_bsoftlimit: ::uint64_t, - pub dqb_curbytes: ::uint64_t, - pub dqb_ihardlimit: ::uint32_t, - pub dqb_isoftlimit: ::uint32_t, - pub dqb_curinodes: ::uint32_t, - pub dqb_btime: ::uint32_t, - pub dqb_itime: ::uint32_t, - pub dqb_id: ::uint32_t, - pub dqb_spare: [::uint32_t; 4], - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_cc: [::cc_t; ::NCCS], - pub c_ispeed: ::speed_t, - pub c_ospeed: ::speed_t, - } - - pub struct flock { - pub l_start: ::off_t, - pub l_len: ::off_t, - pub l_pid: ::pid_t, - pub l_type: ::c_short, - pub l_whence: ::c_short, - } - - pub struct sf_hdtr { - pub headers: *mut ::iovec, - pub hdr_cnt: ::c_int, - pub trailers: *mut ::iovec, - pub trl_cnt: ::c_int, - } - - pub struct lconv { - pub decimal_point: *mut ::c_char, - pub thousands_sep: *mut ::c_char, - pub grouping: *mut ::c_char, - pub int_curr_symbol: *mut ::c_char, - pub currency_symbol: *mut ::c_char, - pub mon_decimal_point: *mut ::c_char, - pub mon_thousands_sep: *mut ::c_char, - pub mon_grouping: *mut ::c_char, - pub positive_sign: *mut ::c_char, - pub negative_sign: *mut ::c_char, - pub int_frac_digits: ::c_char, - pub frac_digits: ::c_char, - pub p_cs_precedes: ::c_char, - pub p_sep_by_space: ::c_char, - pub n_cs_precedes: ::c_char, - pub n_sep_by_space: ::c_char, - pub p_sign_posn: ::c_char, - pub n_sign_posn: ::c_char, - pub int_p_cs_precedes: ::c_char, - pub int_n_cs_precedes: ::c_char, - pub int_p_sep_by_space: ::c_char, - pub int_n_sep_by_space: ::c_char, - pub int_p_sign_posn: ::c_char, - pub int_n_sign_posn: ::c_char, - } - - pub struct sigevent { - pub sigev_notify: ::c_int, - pub sigev_signo: ::c_int, - pub sigev_value: ::sigval, - __unused1: *mut ::c_void, //actually a function pointer - pub sigev_notify_attributes: *mut ::pthread_attr_t - } -} - -pub const _UTX_USERSIZE: usize = 256; -pub const _UTX_LINESIZE: usize = 32; -pub const _UTX_IDSIZE: usize = 4; -pub const _UTX_HOSTSIZE: usize = 256; - -pub const EMPTY: ::c_short = 0; -pub const RUN_LVL: ::c_short = 1; -pub const BOOT_TIME: ::c_short = 2; -pub const OLD_TIME: ::c_short = 3; -pub const NEW_TIME: ::c_short = 4; -pub const INIT_PROCESS: ::c_short = 5; -pub const LOGIN_PROCESS: ::c_short = 6; -pub const USER_PROCESS: ::c_short = 7; -pub const DEAD_PROCESS: ::c_short = 8; -pub const ACCOUNTING: ::c_short = 9; -pub const SIGNATURE: ::c_short = 10; -pub const SHUTDOWN_TIME: ::c_short = 11; - -pub const LC_COLLATE_MASK: ::c_int = (1 << 0); -pub const LC_CTYPE_MASK: ::c_int = (1 << 1); -pub const LC_MESSAGES_MASK: ::c_int = (1 << 2); -pub const LC_MONETARY_MASK: ::c_int = (1 << 3); -pub const LC_NUMERIC_MASK: ::c_int = (1 << 4); -pub const LC_TIME_MASK: ::c_int = (1 << 5); -pub const LC_ALL_MASK: ::c_int = LC_COLLATE_MASK - | LC_CTYPE_MASK - | LC_MESSAGES_MASK - | LC_MONETARY_MASK - | LC_NUMERIC_MASK - | LC_TIME_MASK; - -pub const CODESET: ::nl_item = 0; -pub const D_T_FMT: ::nl_item = 1; -pub const D_FMT: ::nl_item = 2; -pub const T_FMT: ::nl_item = 3; -pub const T_FMT_AMPM: ::nl_item = 4; -pub const AM_STR: ::nl_item = 5; -pub const PM_STR: ::nl_item = 6; - -pub const DAY_1: ::nl_item = 7; -pub const DAY_2: ::nl_item = 8; -pub const DAY_3: ::nl_item = 9; -pub const DAY_4: ::nl_item = 10; -pub const DAY_5: ::nl_item = 11; -pub const DAY_6: ::nl_item = 12; -pub const DAY_7: ::nl_item = 13; - -pub const ABDAY_1: ::nl_item = 14; -pub const ABDAY_2: ::nl_item = 15; -pub const ABDAY_3: ::nl_item = 16; -pub const ABDAY_4: ::nl_item = 17; -pub const ABDAY_5: ::nl_item = 18; -pub const ABDAY_6: ::nl_item = 19; -pub const ABDAY_7: ::nl_item = 20; - -pub const MON_1: ::nl_item = 21; -pub const MON_2: ::nl_item = 22; -pub const MON_3: ::nl_item = 23; -pub const MON_4: ::nl_item = 24; -pub const MON_5: ::nl_item = 25; -pub const MON_6: ::nl_item = 26; -pub const MON_7: ::nl_item = 27; -pub const MON_8: ::nl_item = 28; -pub const MON_9: ::nl_item = 29; -pub const MON_10: ::nl_item = 30; -pub const MON_11: ::nl_item = 31; -pub const MON_12: ::nl_item = 32; - -pub const ABMON_1: ::nl_item = 33; -pub const ABMON_2: ::nl_item = 34; -pub const ABMON_3: ::nl_item = 35; -pub const ABMON_4: ::nl_item = 36; -pub const ABMON_5: ::nl_item = 37; -pub const ABMON_6: ::nl_item = 38; -pub const ABMON_7: ::nl_item = 39; -pub const ABMON_8: ::nl_item = 40; -pub const ABMON_9: ::nl_item = 41; -pub const ABMON_10: ::nl_item = 42; -pub const ABMON_11: ::nl_item = 43; -pub const ABMON_12: ::nl_item = 44; - -pub const ERA: ::nl_item = 45; -pub const ERA_D_FMT: ::nl_item = 46; -pub const ERA_D_T_FMT: ::nl_item = 47; -pub const ERA_T_FMT: ::nl_item = 48; -pub const ALT_DIGITS: ::nl_item = 49; - -pub const RADIXCHAR: ::nl_item = 50; -pub const THOUSEP: ::nl_item = 51; - -pub const YESEXPR: ::nl_item = 52; -pub const NOEXPR: ::nl_item = 53; - -pub const YESSTR: ::nl_item = 54; -pub const NOSTR: ::nl_item = 55; - -pub const CRNCYSTR: ::nl_item = 56; - -pub const D_MD_ORDER: ::nl_item = 57; - -pub const EXIT_FAILURE: ::c_int = 1; -pub const EXIT_SUCCESS: ::c_int = 0; -pub const RAND_MAX: ::c_int = 2147483647; -pub const EOF: ::c_int = -1; -pub const SEEK_SET: ::c_int = 0; -pub const SEEK_CUR: ::c_int = 1; -pub const SEEK_END: ::c_int = 2; -pub const _IOFBF: ::c_int = 0; -pub const _IONBF: ::c_int = 2; -pub const _IOLBF: ::c_int = 1; -pub const BUFSIZ: ::c_uint = 1024; -pub const FOPEN_MAX: ::c_uint = 20; -pub const FILENAME_MAX: ::c_uint = 1024; -pub const L_tmpnam: ::c_uint = 1024; -pub const TMP_MAX: ::c_uint = 308915776; -pub const _PC_LINK_MAX: ::c_int = 1; -pub const _PC_MAX_CANON: ::c_int = 2; -pub const _PC_MAX_INPUT: ::c_int = 3; -pub const _PC_NAME_MAX: ::c_int = 4; -pub const _PC_PATH_MAX: ::c_int = 5; -pub const _PC_PIPE_BUF: ::c_int = 6; -pub const _PC_CHOWN_RESTRICTED: ::c_int = 7; -pub const _PC_NO_TRUNC: ::c_int = 8; -pub const _PC_VDISABLE: ::c_int = 9; -pub const O_RDONLY: ::c_int = 0; -pub const O_WRONLY: ::c_int = 1; -pub const O_RDWR: ::c_int = 2; -pub const O_APPEND: ::c_int = 8; -pub const O_CREAT: ::c_int = 512; -pub const O_EXCL: ::c_int = 2048; -pub const O_NOCTTY: ::c_int = 131072; -pub const O_TRUNC: ::c_int = 1024; -pub const O_CLOEXEC: ::c_int = 0x1000000; -pub const O_DIRECTORY: ::c_int = 0x100000; -pub const S_IFIFO: mode_t = 4096; -pub const S_IFCHR: mode_t = 8192; -pub const S_IFBLK: mode_t = 24576; -pub const S_IFDIR: mode_t = 16384; -pub const S_IFREG: mode_t = 32768; -pub const S_IFLNK: mode_t = 40960; -pub const S_IFSOCK: mode_t = 49152; -pub const S_IFMT: mode_t = 61440; -pub const S_IEXEC: mode_t = 64; -pub const S_IWRITE: mode_t = 128; -pub const S_IREAD: mode_t = 256; -pub const S_IRWXU: mode_t = 448; -pub const S_IXUSR: mode_t = 64; -pub const S_IWUSR: mode_t = 128; -pub const S_IRUSR: mode_t = 256; -pub const S_IRWXG: mode_t = 56; -pub const S_IXGRP: mode_t = 8; -pub const S_IWGRP: mode_t = 16; -pub const S_IRGRP: mode_t = 32; -pub const S_IRWXO: mode_t = 7; -pub const S_IXOTH: mode_t = 1; -pub const S_IWOTH: mode_t = 2; -pub const S_IROTH: mode_t = 4; -pub const F_OK: ::c_int = 0; -pub const R_OK: ::c_int = 4; -pub const W_OK: ::c_int = 2; -pub const X_OK: ::c_int = 1; -pub const STDIN_FILENO: ::c_int = 0; -pub const STDOUT_FILENO: ::c_int = 1; -pub const STDERR_FILENO: ::c_int = 2; -pub const F_LOCK: ::c_int = 1; -pub const F_TEST: ::c_int = 3; -pub const F_TLOCK: ::c_int = 2; -pub const F_ULOCK: ::c_int = 0; -pub const F_GETLK: ::c_int = 7; -pub const F_SETLK: ::c_int = 8; -pub const F_SETLKW: ::c_int = 9; -pub const SIGHUP: ::c_int = 1; -pub const SIGINT: ::c_int = 2; -pub const SIGQUIT: ::c_int = 3; -pub const SIGILL: ::c_int = 4; -pub const SIGABRT: ::c_int = 6; -pub const SIGEMT: ::c_int = 7; -pub const SIGFPE: ::c_int = 8; -pub const SIGKILL: ::c_int = 9; -pub const SIGSEGV: ::c_int = 11; -pub const SIGPIPE: ::c_int = 13; -pub const SIGALRM: ::c_int = 14; -pub const SIGTERM: ::c_int = 15; - -pub const PROT_NONE: ::c_int = 0; -pub const PROT_READ: ::c_int = 1; -pub const PROT_WRITE: ::c_int = 2; -pub const PROT_EXEC: ::c_int = 4; - -pub const MAP_FILE: ::c_int = 0x0000; -pub const MAP_SHARED: ::c_int = 0x0001; -pub const MAP_PRIVATE: ::c_int = 0x0002; -pub const MAP_FIXED: ::c_int = 0x0010; -pub const MAP_ANON: ::c_int = 0x1000; - -pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void; - -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const MS_ASYNC: ::c_int = 0x0001; -pub const MS_INVALIDATE: ::c_int = 0x0002; -pub const MS_SYNC: ::c_int = 0x0010; - -pub const MS_KILLPAGES: ::c_int = 0x0004; -pub const MS_DEACTIVATE: ::c_int = 0x0008; - -pub const EPERM: ::c_int = 1; -pub const ENOENT: ::c_int = 2; -pub const ESRCH: ::c_int = 3; -pub const EINTR: ::c_int = 4; -pub const EIO: ::c_int = 5; -pub const ENXIO: ::c_int = 6; -pub const E2BIG: ::c_int = 7; -pub const ENOEXEC: ::c_int = 8; -pub const EBADF: ::c_int = 9; -pub const ECHILD: ::c_int = 10; -pub const EDEADLK: ::c_int = 11; -pub const ENOMEM: ::c_int = 12; -pub const EACCES: ::c_int = 13; -pub const EFAULT: ::c_int = 14; -pub const ENOTBLK: ::c_int = 15; -pub const EBUSY: ::c_int = 16; -pub const EEXIST: ::c_int = 17; -pub const EXDEV: ::c_int = 18; -pub const ENODEV: ::c_int = 19; -pub const ENOTDIR: ::c_int = 20; -pub const EISDIR: ::c_int = 21; -pub const EINVAL: ::c_int = 22; -pub const ENFILE: ::c_int = 23; -pub const EMFILE: ::c_int = 24; -pub const ENOTTY: ::c_int = 25; -pub const ETXTBSY: ::c_int = 26; -pub const EFBIG: ::c_int = 27; -pub const ENOSPC: ::c_int = 28; -pub const ESPIPE: ::c_int = 29; -pub const EROFS: ::c_int = 30; -pub const EMLINK: ::c_int = 31; -pub const EPIPE: ::c_int = 32; -pub const EDOM: ::c_int = 33; -pub const ERANGE: ::c_int = 34; -pub const EAGAIN: ::c_int = 35; -pub const EWOULDBLOCK: ::c_int = EAGAIN; -pub const EINPROGRESS: ::c_int = 36; -pub const EALREADY: ::c_int = 37; -pub const ENOTSOCK: ::c_int = 38; -pub const EDESTADDRREQ: ::c_int = 39; -pub const EMSGSIZE: ::c_int = 40; -pub const EPROTOTYPE: ::c_int = 41; -pub const ENOPROTOOPT: ::c_int = 42; -pub const EPROTONOSUPPORT: ::c_int = 43; -pub const ESOCKTNOSUPPORT: ::c_int = 44; -pub const ENOTSUP: ::c_int = 45; -pub const EPFNOSUPPORT: ::c_int = 46; -pub const EAFNOSUPPORT: ::c_int = 47; -pub const EADDRINUSE: ::c_int = 48; -pub const EADDRNOTAVAIL: ::c_int = 49; -pub const ENETDOWN: ::c_int = 50; -pub const ENETUNREACH: ::c_int = 51; -pub const ENETRESET: ::c_int = 52; -pub const ECONNABORTED: ::c_int = 53; -pub const ECONNRESET: ::c_int = 54; -pub const ENOBUFS: ::c_int = 55; -pub const EISCONN: ::c_int = 56; -pub const ENOTCONN: ::c_int = 57; -pub const ESHUTDOWN: ::c_int = 58; -pub const ETOOMANYREFS: ::c_int = 59; -pub const ETIMEDOUT: ::c_int = 60; -pub const ECONNREFUSED: ::c_int = 61; -pub const ELOOP: ::c_int = 62; -pub const ENAMETOOLONG: ::c_int = 63; -pub const EHOSTDOWN: ::c_int = 64; -pub const EHOSTUNREACH: ::c_int = 65; -pub const ENOTEMPTY: ::c_int = 66; -pub const EPROCLIM: ::c_int = 67; -pub const EUSERS: ::c_int = 68; -pub const EDQUOT: ::c_int = 69; -pub const ESTALE: ::c_int = 70; -pub const EREMOTE: ::c_int = 71; -pub const EBADRPC: ::c_int = 72; -pub const ERPCMISMATCH: ::c_int = 73; -pub const EPROGUNAVAIL: ::c_int = 74; -pub const EPROGMISMATCH: ::c_int = 75; -pub const EPROCUNAVAIL: ::c_int = 76; -pub const ENOLCK: ::c_int = 77; -pub const ENOSYS: ::c_int = 78; -pub const EFTYPE: ::c_int = 79; -pub const EAUTH: ::c_int = 80; -pub const ENEEDAUTH: ::c_int = 81; -pub const EPWROFF: ::c_int = 82; -pub const EDEVERR: ::c_int = 83; -pub const EOVERFLOW: ::c_int = 84; -pub const EBADEXEC: ::c_int = 85; -pub const EBADARCH: ::c_int = 86; -pub const ESHLIBVERS: ::c_int = 87; -pub const EBADMACHO: ::c_int = 88; -pub const ECANCELED: ::c_int = 89; -pub const EIDRM: ::c_int = 90; -pub const ENOMSG: ::c_int = 91; -pub const EILSEQ: ::c_int = 92; -pub const ENOATTR: ::c_int = 93; -pub const EBADMSG: ::c_int = 94; -pub const EMULTIHOP: ::c_int = 95; -pub const ENODATA: ::c_int = 96; -pub const ENOLINK: ::c_int = 97; -pub const ENOSR: ::c_int = 98; -pub const ENOSTR: ::c_int = 99; -pub const EPROTO: ::c_int = 100; -pub const ETIME: ::c_int = 101; -pub const EOPNOTSUPP: ::c_int = 102; -pub const ENOPOLICY: ::c_int = 103; -pub const ENOTRECOVERABLE: ::c_int = 104; -pub const EOWNERDEAD: ::c_int = 105; -pub const EQFULL: ::c_int = 106; -pub const ELAST: ::c_int = 106; - -pub const EAI_SYSTEM: ::c_int = 11; - -pub const F_DUPFD: ::c_int = 0; -pub const F_DUPFD_CLOEXEC: ::c_int = 67; -pub const F_GETFD: ::c_int = 1; -pub const F_SETFD: ::c_int = 2; -pub const F_GETFL: ::c_int = 3; -pub const F_SETFL: ::c_int = 4; -pub const F_PREALLOCATE: ::c_int = 42; -pub const F_RDADVISE: ::c_int = 44; -pub const F_RDAHEAD: ::c_int = 45; -pub const F_NOCACHE: ::c_int = 48; -pub const F_GETPATH: ::c_int = 50; -pub const F_FULLFSYNC: ::c_int = 51; -pub const F_FREEZE_FS: ::c_int = 53; -pub const F_THAW_FS: ::c_int = 54; -pub const F_GLOBAL_NOCACHE: ::c_int = 55; -pub const F_NODIRECT: ::c_int = 62; - -pub const F_ALLOCATECONTIG: ::c_uint = 0x02; -pub const F_ALLOCATEALL: ::c_uint = 0x04; - -pub const F_PEOFPOSMODE: ::c_int = 3; -pub const F_VOLPOSMODE: ::c_int = 4; - -pub const O_ACCMODE: ::c_int = 3; - -pub const TIOCMODG: ::c_ulong = 0x40047403; -pub const TIOCMODS: ::c_ulong = 0x80047404; -pub const TIOCM_LE: ::c_int = 0x1; -pub const TIOCM_DTR: ::c_int = 0x2; -pub const TIOCM_RTS: ::c_int = 0x4; -pub const TIOCM_ST: ::c_int = 0x8; -pub const TIOCM_SR: ::c_int = 0x10; -pub const TIOCM_CTS: ::c_int = 0x20; -pub const TIOCM_CAR: ::c_int = 0x40; -pub const TIOCM_CD: ::c_int = 0x40; -pub const TIOCM_RNG: ::c_int = 0x80; -pub const TIOCM_RI: ::c_int = 0x80; -pub const TIOCM_DSR: ::c_int = 0x100; -pub const TIOCEXCL: ::c_int = 0x2000740d; -pub const TIOCNXCL: ::c_int = 0x2000740e; -pub const TIOCFLUSH: ::c_ulong = 0x80047410; -pub const TIOCGETD: ::c_ulong = 0x4004741a; -pub const TIOCSETD: ::c_ulong = 0x8004741b; -pub const TIOCIXON: ::c_uint = 0x20007481; -pub const TIOCIXOFF: ::c_uint = 0x20007480; -pub const TIOCSBRK: ::c_uint = 0x2000747b; -pub const TIOCCBRK: ::c_uint = 0x2000747a; -pub const TIOCSDTR: ::c_uint = 0x20007479; -pub const TIOCCDTR: ::c_uint = 0x20007478; -pub const TIOCGPGRP: ::c_ulong = 0x40047477; -pub const TIOCSPGRP: ::c_ulong = 0x80047476; -pub const TIOCOUTQ: ::c_ulong = 0x40047473; -pub const TIOCSTI: ::c_ulong = 0x80017472; -pub const TIOCNOTTY: ::c_uint = 0x20007471; -pub const TIOCPKT: ::c_ulong = 0x80047470; -pub const TIOCPKT_DATA: ::c_int = 0x0; -pub const TIOCPKT_FLUSHREAD: ::c_int = 0x1; -pub const TIOCPKT_FLUSHWRITE: ::c_int = 0x2; -pub const TIOCPKT_STOP: ::c_int = 0x4; -pub const TIOCPKT_START: ::c_int = 0x8; -pub const TIOCPKT_NOSTOP: ::c_int = 0x10; -pub const TIOCPKT_DOSTOP: ::c_int = 0x20; -pub const TIOCPKT_IOCTL: ::c_int = 0x40; -pub const TIOCSTOP: ::c_uint = 0x2000746f; -pub const TIOCSTART: ::c_uint = 0x2000746e; -pub const TIOCMSET: ::c_ulong = 0x8004746d; -pub const TIOCMBIS: ::c_ulong = 0x8004746c; -pub const TIOCMBIC: ::c_ulong = 0x8004746b; -pub const TIOCMGET: ::c_ulong = 0x4004746a; -pub const TIOCREMOTE: ::c_ulong = 0x80047469; -pub const TIOCGWINSZ: ::c_ulong = 0x40087468; -pub const TIOCSWINSZ: ::c_ulong = 0x80087467; -pub const TIOCUCNTL: ::c_ulong = 0x80047466; -pub const TIOCSTAT: ::c_uint = 0x20007465; -pub const TIOCSCONS: ::c_uint = 0x20007463; -pub const TIOCCONS: ::c_ulong = 0x80047462; -pub const TIOCSCTTY: ::c_uint = 0x20007461; -pub const TIOCEXT: ::c_ulong = 0x80047460; -pub const TIOCSIG: ::c_uint = 0x2000745f; -pub const TIOCDRAIN: ::c_uint = 0x2000745e; -pub const TIOCMSDTRWAIT: ::c_ulong = 0x8004745b; -pub const TIOCMGDTRWAIT: ::c_ulong = 0x4004745a; -pub const TIOCSDRAINWAIT: ::c_ulong = 0x80047457; -pub const TIOCGDRAINWAIT: ::c_ulong = 0x40047456; -pub const TIOCDSIMICROCODE: ::c_uint = 0x20007455; -pub const TIOCPTYGRANT: ::c_uint = 0x20007454; -pub const TIOCPTYGNAME: ::c_uint = 0x40807453; -pub const TIOCPTYUNLK: ::c_uint = 0x20007452; - -pub const SIGTRAP: ::c_int = 5; - -pub const GLOB_APPEND : ::c_int = 0x0001; -pub const GLOB_DOOFFS : ::c_int = 0x0002; -pub const GLOB_ERR : ::c_int = 0x0004; -pub const GLOB_MARK : ::c_int = 0x0008; -pub const GLOB_NOCHECK : ::c_int = 0x0010; -pub const GLOB_NOSORT : ::c_int = 0x0020; -pub const GLOB_NOESCAPE: ::c_int = 0x2000; - -pub const GLOB_NOSPACE : ::c_int = -1; -pub const GLOB_ABORTED : ::c_int = -2; -pub const GLOB_NOMATCH : ::c_int = -3; - -pub const POSIX_MADV_NORMAL: ::c_int = 0; -pub const POSIX_MADV_RANDOM: ::c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: ::c_int = 2; -pub const POSIX_MADV_WILLNEED: ::c_int = 3; -pub const POSIX_MADV_DONTNEED: ::c_int = 4; - -pub const _SC_IOV_MAX: ::c_int = 56; -pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 70; -pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 71; -pub const _SC_LOGIN_NAME_MAX: ::c_int = 73; -pub const _SC_MQ_PRIO_MAX: ::c_int = 75; -pub const _SC_THREAD_ATTR_STACKADDR: ::c_int = 82; -pub const _SC_THREAD_ATTR_STACKSIZE: ::c_int = 83; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: ::c_int = 85; -pub const _SC_THREAD_KEYS_MAX: ::c_int = 86; -pub const _SC_THREAD_PRIO_INHERIT: ::c_int = 87; -pub const _SC_THREAD_PRIO_PROTECT: ::c_int = 88; -pub const _SC_THREAD_PRIORITY_SCHEDULING: ::c_int = 89; -pub const _SC_THREAD_PROCESS_SHARED: ::c_int = 90; -pub const _SC_THREAD_SAFE_FUNCTIONS: ::c_int = 91; -pub const _SC_THREAD_STACK_MIN: ::c_int = 93; -pub const _SC_THREAD_THREADS_MAX: ::c_int = 94; -pub const _SC_THREADS: ::c_int = 96; -pub const _SC_TTY_NAME_MAX: ::c_int = 101; -pub const _SC_ATEXIT_MAX: ::c_int = 107; -pub const _SC_XOPEN_CRYPT: ::c_int = 108; -pub const _SC_XOPEN_ENH_I18N: ::c_int = 109; -pub const _SC_XOPEN_LEGACY: ::c_int = 110; -pub const _SC_XOPEN_REALTIME: ::c_int = 111; -pub const _SC_XOPEN_REALTIME_THREADS: ::c_int = 112; -pub const _SC_XOPEN_SHM: ::c_int = 113; -pub const _SC_XOPEN_UNIX: ::c_int = 115; -pub const _SC_XOPEN_VERSION: ::c_int = 116; -pub const _SC_XOPEN_XCU_VERSION: ::c_int = 121; - -pub const PTHREAD_CREATE_JOINABLE: ::c_int = 1; -pub const PTHREAD_CREATE_DETACHED: ::c_int = 2; -pub const PTHREAD_STACK_MIN: ::size_t = 8192; - -pub const RLIMIT_CPU: ::c_int = 0; -pub const RLIMIT_FSIZE: ::c_int = 1; -pub const RLIMIT_DATA: ::c_int = 2; -pub const RLIMIT_STACK: ::c_int = 3; -pub const RLIMIT_CORE: ::c_int = 4; -pub const RLIMIT_AS: ::c_int = 5; -pub const RLIMIT_RSS: ::c_int = RLIMIT_AS; -pub const RLIMIT_MEMLOCK: ::c_int = 6; -pub const RLIMIT_NPROC: ::c_int = 7; -pub const RLIMIT_NOFILE: ::c_int = 8; -pub const RLIM_NLIMITS: ::c_int = 9; -pub const _RLIMIT_POSIX_FLAG: ::c_int = 0x1000; - -pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; - -pub const RUSAGE_SELF: ::c_int = 0; -pub const RUSAGE_CHILDREN: ::c_int = -1; - -pub const MADV_NORMAL: ::c_int = 0; -pub const MADV_RANDOM: ::c_int = 1; -pub const MADV_SEQUENTIAL: ::c_int = 2; -pub const MADV_WILLNEED: ::c_int = 3; -pub const MADV_DONTNEED: ::c_int = 4; -pub const MADV_FREE: ::c_int = 5; -pub const MADV_ZERO_WIRED_PAGES: ::c_int = 6; -pub const MADV_FREE_REUSABLE: ::c_int = 7; -pub const MADV_FREE_REUSE: ::c_int = 8; -pub const MADV_CAN_REUSE: ::c_int = 9; - -pub const MINCORE_INCORE: ::c_int = 0x1; -pub const MINCORE_REFERENCED: ::c_int = 0x2; -pub const MINCORE_MODIFIED: ::c_int = 0x4; -pub const MINCORE_REFERENCED_OTHER: ::c_int = 0x8; -pub const MINCORE_MODIFIED_OTHER: ::c_int = 0x10; - -pub const AF_UNIX: ::c_int = 1; -pub const AF_INET: ::c_int = 2; -pub const AF_INET6: ::c_int = 30; -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; -pub const SOCK_RAW: ::c_int = 3; -pub const SOCK_SEQPACKET: ::c_int = 5; -pub const IPPROTO_TCP: ::c_int = 6; -pub const IPPROTO_IP: ::c_int = 0; -pub const IPPROTO_IPV6: ::c_int = 41; -pub const IP_MULTICAST_TTL: ::c_int = 10; -pub const IP_MULTICAST_LOOP: ::c_int = 11; -pub const IP_TTL: ::c_int = 4; -pub const IP_HDRINCL: ::c_int = 2; -pub const IP_ADD_MEMBERSHIP: ::c_int = 12; -pub const IP_DROP_MEMBERSHIP: ::c_int = 13; -pub const IPV6_JOIN_GROUP: ::c_int = 12; -pub const IPV6_LEAVE_GROUP: ::c_int = 13; - -pub const TCP_NODELAY: ::c_int = 0x01; -pub const TCP_KEEPALIVE: ::c_int = 0x10; -pub const SOL_SOCKET: ::c_int = 0xffff; - -pub const SO_DEBUG: ::c_int = 0x01; -pub const SO_ACCEPTCONN: ::c_int = 0x0002; -pub const SO_REUSEADDR: ::c_int = 0x0004; -pub const SO_KEEPALIVE: ::c_int = 0x0008; -pub const SO_DONTROUTE: ::c_int = 0x0010; -pub const SO_BROADCAST: ::c_int = 0x0020; -pub const SO_USELOOPBACK: ::c_int = 0x0040; -pub const SO_LINGER: ::c_int = 0x0080; -pub const SO_OOBINLINE: ::c_int = 0x0100; -pub const SO_REUSEPORT: ::c_int = 0x0200; -pub const SO_SNDBUF: ::c_int = 0x1001; -pub const SO_RCVBUF: ::c_int = 0x1002; -pub const SO_SNDLOWAT: ::c_int = 0x1003; -pub const SO_RCVLOWAT: ::c_int = 0x1004; -pub const SO_SNDTIMEO: ::c_int = 0x1005; -pub const SO_RCVTIMEO: ::c_int = 0x1006; -pub const SO_ERROR: ::c_int = 0x1007; -pub const SO_TYPE: ::c_int = 0x1008; - -pub const IFF_LOOPBACK: ::c_int = 0x8; - -pub const SHUT_RD: ::c_int = 0; -pub const SHUT_WR: ::c_int = 1; -pub const SHUT_RDWR: ::c_int = 2; - -pub const LOCK_SH: ::c_int = 1; -pub const LOCK_EX: ::c_int = 2; -pub const LOCK_NB: ::c_int = 4; -pub const LOCK_UN: ::c_int = 8; - -pub const O_DSYNC: ::c_int = 4194304; -pub const O_SYNC: ::c_int = 128; -pub const O_NONBLOCK: ::c_int = 4; - -pub const MAP_COPY: ::c_int = 0x0002; -pub const MAP_RENAME: ::c_int = 0x0020; -pub const MAP_NORESERVE: ::c_int = 0x0040; -pub const MAP_NOEXTEND: ::c_int = 0x0100; -pub const MAP_HASSEMAPHORE: ::c_int = 0x0200; -pub const MAP_NOCACHE: ::c_int = 0x0400; -pub const MAP_JIT: ::c_int = 0x0800; - -pub const IPPROTO_RAW: ::c_int = 255; - -pub const SO_NREAD: ::c_int = 0x1020; -pub const SO_NKE: ::c_int = 0x1021; -pub const SO_NOSIGPIPE: ::c_int = 0x1022; -pub const SO_NOADDRERR: ::c_int = 0x1023; -pub const SO_NWRITE: ::c_int = 0x1024; -pub const SO_DONTTRUNC: ::c_int = 0x2000; -pub const SO_WANTMORE: ::c_int = 0x4000; -pub const SO_WANTOOBFLAG: ::c_int = 0x8000; - -pub const _SC_ARG_MAX: ::c_int = 1; -pub const _SC_CHILD_MAX: ::c_int = 2; -pub const _SC_CLK_TCK: ::c_int = 3; -pub const _SC_NGROUPS_MAX: ::c_int = 4; -pub const _SC_OPEN_MAX: ::c_int = 5; -pub const _SC_JOB_CONTROL: ::c_int = 6; -pub const _SC_SAVED_IDS: ::c_int = 7; -pub const _SC_VERSION: ::c_int = 8; -pub const _SC_BC_BASE_MAX: ::c_int = 9; -pub const _SC_BC_DIM_MAX: ::c_int = 10; -pub const _SC_BC_SCALE_MAX: ::c_int = 11; -pub const _SC_BC_STRING_MAX: ::c_int = 12; -pub const _SC_COLL_WEIGHTS_MAX: ::c_int = 13; -pub const _SC_EXPR_NEST_MAX: ::c_int = 14; -pub const _SC_LINE_MAX: ::c_int = 15; -pub const _SC_RE_DUP_MAX: ::c_int = 16; -pub const _SC_2_VERSION: ::c_int = 17; -pub const _SC_2_C_BIND: ::c_int = 18; -pub const _SC_2_C_DEV: ::c_int = 19; -pub const _SC_2_CHAR_TERM: ::c_int = 20; -pub const _SC_2_FORT_DEV: ::c_int = 21; -pub const _SC_2_FORT_RUN: ::c_int = 22; -pub const _SC_2_LOCALEDEF: ::c_int = 23; -pub const _SC_2_SW_DEV: ::c_int = 24; -pub const _SC_2_UPE: ::c_int = 25; -pub const _SC_STREAM_MAX: ::c_int = 26; -pub const _SC_TZNAME_MAX: ::c_int = 27; -pub const _SC_ASYNCHRONOUS_IO: ::c_int = 28; -pub const _SC_PAGESIZE: ::c_int = 29; -pub const _SC_MEMLOCK: ::c_int = 30; -pub const _SC_MEMLOCK_RANGE: ::c_int = 31; -pub const _SC_MEMORY_PROTECTION: ::c_int = 32; -pub const _SC_MESSAGE_PASSING: ::c_int = 33; -pub const _SC_PRIORITIZED_IO: ::c_int = 34; -pub const _SC_PRIORITY_SCHEDULING: ::c_int = 35; -pub const _SC_REALTIME_SIGNALS: ::c_int = 36; -pub const _SC_SEMAPHORES: ::c_int = 37; -pub const _SC_FSYNC: ::c_int = 38; -pub const _SC_SHARED_MEMORY_OBJECTS: ::c_int = 39; -pub const _SC_SYNCHRONIZED_IO: ::c_int = 40; -pub const _SC_TIMERS: ::c_int = 41; -pub const _SC_AIO_LISTIO_MAX: ::c_int = 42; -pub const _SC_AIO_MAX: ::c_int = 43; -pub const _SC_AIO_PRIO_DELTA_MAX: ::c_int = 44; -pub const _SC_DELAYTIMER_MAX: ::c_int = 45; -pub const _SC_MQ_OPEN_MAX: ::c_int = 46; -pub const _SC_MAPPED_FILES: ::c_int = 47; -pub const _SC_RTSIG_MAX: ::c_int = 48; -pub const _SC_SEM_NSEMS_MAX: ::c_int = 49; -pub const _SC_SEM_VALUE_MAX: ::c_int = 50; -pub const _SC_SIGQUEUE_MAX: ::c_int = 51; -pub const _SC_TIMER_MAX: ::c_int = 52; -pub const _SC_NPROCESSORS_CONF: ::c_int = 57; -pub const _SC_NPROCESSORS_ONLN: ::c_int = 58; -pub const _SC_2_PBS: ::c_int = 59; -pub const _SC_2_PBS_ACCOUNTING: ::c_int = 60; -pub const _SC_2_PBS_CHECKPOINT: ::c_int = 61; -pub const _SC_2_PBS_LOCATE: ::c_int = 62; -pub const _SC_2_PBS_MESSAGE: ::c_int = 63; -pub const _SC_2_PBS_TRACK: ::c_int = 64; -pub const _SC_ADVISORY_INFO: ::c_int = 65; -pub const _SC_BARRIERS: ::c_int = 66; -pub const _SC_CLOCK_SELECTION: ::c_int = 67; -pub const _SC_CPUTIME: ::c_int = 68; -pub const _SC_FILE_LOCKING: ::c_int = 69; -pub const _SC_HOST_NAME_MAX: ::c_int = 72; -pub const _SC_MONOTONIC_CLOCK: ::c_int = 74; -pub const _SC_READER_WRITER_LOCKS: ::c_int = 76; -pub const _SC_REGEXP: ::c_int = 77; -pub const _SC_SHELL: ::c_int = 78; -pub const _SC_SPAWN: ::c_int = 79; -pub const _SC_SPIN_LOCKS: ::c_int = 80; -pub const _SC_SPORADIC_SERVER: ::c_int = 81; -pub const _SC_THREAD_CPUTIME: ::c_int = 84; -pub const _SC_THREAD_SPORADIC_SERVER: ::c_int = 92; -pub const _SC_TIMEOUTS: ::c_int = 95; -pub const _SC_TRACE: ::c_int = 97; -pub const _SC_TRACE_EVENT_FILTER: ::c_int = 98; -pub const _SC_TRACE_INHERIT: ::c_int = 99; -pub const _SC_TRACE_LOG: ::c_int = 100; -pub const _SC_TYPED_MEMORY_OBJECTS: ::c_int = 102; -pub const _SC_V6_ILP32_OFF32: ::c_int = 103; -pub const _SC_V6_ILP32_OFFBIG: ::c_int = 104; -pub const _SC_V6_LP64_OFF64: ::c_int = 105; -pub const _SC_V6_LPBIG_OFFBIG: ::c_int = 106; -pub const _SC_IPV6: ::c_int = 118; -pub const _SC_RAW_SOCKETS: ::c_int = 119; -pub const _SC_SYMLOOP_MAX: ::c_int = 120; -pub const _SC_PAGE_SIZE: ::c_int = _SC_PAGESIZE; -pub const _SC_XOPEN_STREAMS: ::c_int = 114; -pub const _SC_XBS5_ILP32_OFF32: ::c_int = 122; -pub const _SC_XBS5_ILP32_OFFBIG: ::c_int = 123; -pub const _SC_XBS5_LP64_OFF64: ::c_int = 124; -pub const _SC_XBS5_LPBIG_OFFBIG: ::c_int = 125; -pub const _SC_SS_REPL_MAX: ::c_int = 126; -pub const _SC_TRACE_EVENT_NAME_MAX: ::c_int = 127; -pub const _SC_TRACE_NAME_MAX: ::c_int = 128; -pub const _SC_TRACE_SYS_MAX: ::c_int = 129; -pub const _SC_TRACE_USER_EVENT_MAX: ::c_int = 130; -pub const _SC_PASS_MAX: ::c_int = 131; - -pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; -pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_NORMAL; -pub const _PTHREAD_MUTEX_SIG_init: ::c_long = 0x32AAABA7; -pub const _PTHREAD_COND_SIG_init: ::c_long = 0x3CB0B1BB; -pub const _PTHREAD_RWLOCK_SIG_init: ::c_long = 0x2DA8B3B4; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __sig: _PTHREAD_MUTEX_SIG_init, - __opaque: [0; __PTHREAD_MUTEX_SIZE__], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __sig: _PTHREAD_COND_SIG_init, - __opaque: [0; __PTHREAD_COND_SIZE__], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __sig: _PTHREAD_RWLOCK_SIG_init, - __opaque: [0; __PTHREAD_RWLOCK_SIZE__], -}; - -pub const SIGSTKSZ: ::size_t = 131072; - -pub const FD_SETSIZE: usize = 1024; - -pub const ST_NOSUID: ::c_ulong = 2; - -pub const EVFILT_READ: ::int16_t = -1; -pub const EVFILT_WRITE: ::int16_t = -2; -pub const EVFILT_AIO: ::int16_t = -3; -pub const EVFILT_VNODE: ::int16_t = -4; -pub const EVFILT_PROC: ::int16_t = -5; -pub const EVFILT_SIGNAL: ::int16_t = -6; -pub const EVFILT_TIMER: ::int16_t = -7; -pub const EVFILT_MACHPORT: ::int16_t = -8; -pub const EVFILT_FS: ::int16_t = -9; -pub const EVFILT_USER: ::int16_t = -10; -pub const EVFILT_VM: ::int16_t = -12; - -pub const EV_ADD: ::uint16_t = 0x1; -pub const EV_DELETE: ::uint16_t = 0x2; -pub const EV_ENABLE: ::uint16_t = 0x4; -pub const EV_DISABLE: ::uint16_t = 0x8; -pub const EV_ONESHOT: ::uint16_t = 0x10; -pub const EV_CLEAR: ::uint16_t = 0x20; -pub const EV_RECEIPT: ::uint16_t = 0x40; -pub const EV_DISPATCH: ::uint16_t = 0x80; -pub const EV_FLAG0: ::uint16_t = 0x1000; -pub const EV_POLL: ::uint16_t = 0x1000; -pub const EV_FLAG1: ::uint16_t = 0x2000; -pub const EV_OOBAND: ::uint16_t = 0x2000; -pub const EV_ERROR: ::uint16_t = 0x4000; -pub const EV_EOF: ::uint16_t = 0x8000; -pub const EV_SYSFLAGS: ::uint16_t = 0xf000; - -pub const NOTE_TRIGGER: ::uint32_t = 0x01000000; -pub const NOTE_FFNOP: ::uint32_t = 0x00000000; -pub const NOTE_FFAND: ::uint32_t = 0x40000000; -pub const NOTE_FFOR: ::uint32_t = 0x80000000; -pub const NOTE_FFCOPY: ::uint32_t = 0xc0000000; -pub const NOTE_FFCTRLMASK: ::uint32_t = 0xc0000000; -pub const NOTE_FFLAGSMASK: ::uint32_t = 0x00ffffff; -pub const NOTE_LOWAT: ::uint32_t = 0x00000001; -pub const NOTE_DELETE: ::uint32_t = 0x00000001; -pub const NOTE_WRITE: ::uint32_t = 0x00000002; -pub const NOTE_EXTEND: ::uint32_t = 0x00000004; -pub const NOTE_ATTRIB: ::uint32_t = 0x00000008; -pub const NOTE_LINK: ::uint32_t = 0x00000010; -pub const NOTE_RENAME: ::uint32_t = 0x00000020; -pub const NOTE_REVOKE: ::uint32_t = 0x00000040; -pub const NOTE_NONE: ::uint32_t = 0x00000080; -pub const NOTE_EXIT: ::uint32_t = 0x80000000; -pub const NOTE_FORK: ::uint32_t = 0x40000000; -pub const NOTE_EXEC: ::uint32_t = 0x20000000; -pub const NOTE_REAP: ::uint32_t = 0x10000000; -pub const NOTE_SIGNAL: ::uint32_t = 0x08000000; -pub const NOTE_EXITSTATUS: ::uint32_t = 0x04000000; -pub const NOTE_EXIT_DETAIL: ::uint32_t = 0x02000000; -pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; -pub const NOTE_PCTRLMASK: ::uint32_t = 0xfff00000; -pub const NOTE_EXIT_REPARENTED: ::uint32_t = 0x00080000; -pub const NOTE_EXIT_DETAIL_MASK: ::uint32_t = 0x00070000; -pub const NOTE_EXIT_DECRYPTFAIL: ::uint32_t = 0x00010000; -pub const NOTE_EXIT_MEMORY: ::uint32_t = 0x00020000; -pub const NOTE_EXIT_CSERROR: ::uint32_t = 0x00040000; -pub const NOTE_VM_PRESSURE: ::uint32_t = 0x80000000; -pub const NOTE_VM_PRESSURE_TERMINATE: ::uint32_t = 0x40000000; -pub const NOTE_VM_PRESSURE_SUDDEN_TERMINATE: ::uint32_t = 0x20000000; -pub const NOTE_VM_ERROR: ::uint32_t = 0x10000000; -pub const NOTE_SECONDS: ::uint32_t = 0x00000001; -pub const NOTE_USECONDS: ::uint32_t = 0x00000002; -pub const NOTE_NSECONDS: ::uint32_t = 0x00000004; -pub const NOTE_ABSOLUTE: ::uint32_t = 0x00000008; -pub const NOTE_LEEWAY: ::uint32_t = 0x00000010; -pub const NOTE_CRITICAL: ::uint32_t = 0x00000020; -pub const NOTE_BACKGROUND: ::uint32_t = 0x00000040; -pub const NOTE_TRACK: ::uint32_t = 0x00000001; -pub const NOTE_TRACKERR: ::uint32_t = 0x00000002; -pub const NOTE_CHILD: ::uint32_t = 0x00000004; - -pub const NL0: ::c_int = 0x00000000; -pub const NL1: ::c_int = 0x00000100; -pub const TAB0: ::c_int = 0x00000000; -pub const TAB1: ::c_int = 0x00000400; -pub const TAB2: ::c_int = 0x00000800; -pub const CR0: ::c_int = 0x00000000; -pub const CR1: ::c_int = 0x00001000; -pub const CR2: ::c_int = 0x00002000; -pub const CR3: ::c_int = 0x00003000; -pub const FF0: ::c_int = 0x00000000; -pub const FF1: ::c_int = 0x00004000; -pub const BS0: ::c_int = 0x00000000; -pub const BS1: ::c_int = 0x00008000; -pub const TAB3: ::c_int = 0x00000004; -pub const VT0: ::c_int = 0x00000000; -pub const VT1: ::c_int = 0x00010000; -pub const IUTF8: ::tcflag_t = 0x00004000; -pub const CRTSCTS: ::tcflag_t = 0x00030000; - -pub const NI_MAXHOST: ::socklen_t = 1025; - -pub const Q_GETQUOTA: ::c_int = 0x300; -pub const Q_SETQUOTA: ::c_int = 0x400; - -pub const RTLD_LOCAL: ::c_int = 0x4; -pub const RTLD_FIRST: ::c_int = 0x100; -pub const RTLD_NODELETE: ::c_int = 0x80; -pub const RTLD_NOLOAD: ::c_int = 0x10; -pub const RTLD_GLOBAL: ::c_int = 0x8; - -pub const _WSTOPPED: ::c_int = 0o177; - -pub const LOG_NETINFO: ::c_int = 12 << 3; -pub const LOG_REMOTEAUTH: ::c_int = 13 << 3; -pub const LOG_INSTALL: ::c_int = 14 << 3; -pub const LOG_RAS: ::c_int = 15 << 3; -pub const LOG_LAUNCHD: ::c_int = 24 << 3; -pub const LOG_NFACILITIES: ::c_int = 25; - -pub const CTLTYPE: ::c_int = 0xf; -pub const CTLTYPE_NODE: ::c_int = 1; -pub const CTLTYPE_INT: ::c_int = 2; -pub const CTLTYPE_STRING: ::c_int = 3; -pub const CTLTYPE_QUAD: ::c_int = 4; -pub const CTLTYPE_OPAQUE: ::c_int = 5; -pub const CTLTYPE_STRUCT: ::c_int = CTLTYPE_OPAQUE; -pub const CTLFLAG_RD: ::c_int = 0x80000000; -pub const CTLFLAG_WR: ::c_int = 0x40000000; -pub const CTLFLAG_RW: ::c_int = CTLFLAG_RD | CTLFLAG_WR; -pub const CTLFLAG_NOLOCK: ::c_int = 0x20000000; -pub const CTLFLAG_ANYBODY: ::c_int = 0x10000000; -pub const CTLFLAG_SECURE: ::c_int = 0x08000000; -pub const CTLFLAG_MASKED: ::c_int = 0x04000000; -pub const CTLFLAG_NOAUTO: ::c_int = 0x02000000; -pub const CTLFLAG_KERN: ::c_int = 0x01000000; -pub const CTLFLAG_LOCKED: ::c_int = 0x00800000; -pub const CTLFLAG_OID2: ::c_int = 0x00400000; -pub const CTL_UNSPEC: ::c_int = 0; -pub const CTL_KERN: ::c_int = 1; -pub const CTL_VM: ::c_int = 2; -pub const CTL_VFS: ::c_int = 3; -pub const CTL_NET: ::c_int = 4; -pub const CTL_DEBUG: ::c_int = 5; -pub const CTL_HW: ::c_int = 6; -pub const CTL_MACHDEP: ::c_int = 7; -pub const CTL_USER: ::c_int = 8; -pub const CTL_MAXID: ::c_int = 9; -pub const KERN_OSTYPE: ::c_int = 1; -pub const KERN_OSRELEASE: ::c_int = 2; -pub const KERN_OSREV: ::c_int = 3; -pub const KERN_VERSION: ::c_int = 4; -pub const KERN_MAXVNODES: ::c_int = 5; -pub const KERN_MAXPROC: ::c_int = 6; -pub const KERN_MAXFILES: ::c_int = 7; -pub const KERN_ARGMAX: ::c_int = 8; -pub const KERN_SECURELVL: ::c_int = 9; -pub const KERN_HOSTNAME: ::c_int = 10; -pub const KERN_HOSTID: ::c_int = 11; -pub const KERN_CLOCKRATE: ::c_int = 12; -pub const KERN_VNODE: ::c_int = 13; -pub const KERN_PROC: ::c_int = 14; -pub const KERN_FILE: ::c_int = 15; -pub const KERN_PROF: ::c_int = 16; -pub const KERN_POSIX1: ::c_int = 17; -pub const KERN_NGROUPS: ::c_int = 18; -pub const KERN_JOB_CONTROL: ::c_int = 19; -pub const KERN_SAVED_IDS: ::c_int = 20; -pub const KERN_BOOTTIME: ::c_int = 21; -pub const KERN_NISDOMAINNAME: ::c_int = 22; -pub const KERN_DOMAINNAME: ::c_int = KERN_NISDOMAINNAME; -pub const KERN_MAXPARTITIONS: ::c_int = 23; -pub const KERN_KDEBUG: ::c_int = 24; -pub const KERN_UPDATEINTERVAL: ::c_int = 25; -pub const KERN_OSRELDATE: ::c_int = 26; -pub const KERN_NTP_PLL: ::c_int = 27; -pub const KERN_BOOTFILE: ::c_int = 28; -pub const KERN_MAXFILESPERPROC: ::c_int = 29; -pub const KERN_MAXPROCPERUID: ::c_int = 30; -pub const KERN_DUMPDEV: ::c_int = 31; -pub const KERN_IPC: ::c_int = 32; -pub const KERN_DUMMY: ::c_int = 33; -pub const KERN_PS_STRINGS: ::c_int = 34; -pub const KERN_USRSTACK32: ::c_int = 35; -pub const KERN_LOGSIGEXIT: ::c_int = 36; -pub const KERN_SYMFILE: ::c_int = 37; -pub const KERN_PROCARGS: ::c_int = 38; -pub const KERN_NETBOOT: ::c_int = 40; -pub const KERN_SYSV: ::c_int = 42; -pub const KERN_AFFINITY: ::c_int = 43; -pub const KERN_TRANSLATE: ::c_int = 44; -pub const KERN_CLASSIC: ::c_int = KERN_TRANSLATE; -pub const KERN_EXEC: ::c_int = 45; -pub const KERN_CLASSICHANDLER: ::c_int = KERN_EXEC; -pub const KERN_AIOMAX: ::c_int = 46; -pub const KERN_AIOPROCMAX: ::c_int = 47; -pub const KERN_AIOTHREADS: ::c_int = 48; -pub const KERN_COREFILE: ::c_int = 50; -pub const KERN_COREDUMP: ::c_int = 51; -pub const KERN_SUGID_COREDUMP: ::c_int = 52; -pub const KERN_PROCDELAYTERM: ::c_int = 53; -pub const KERN_SHREG_PRIVATIZABLE: ::c_int = 54; -pub const KERN_LOW_PRI_WINDOW: ::c_int = 56; -pub const KERN_LOW_PRI_DELAY: ::c_int = 57; -pub const KERN_POSIX: ::c_int = 58; -pub const KERN_USRSTACK64: ::c_int = 59; -pub const KERN_NX_PROTECTION: ::c_int = 60; -pub const KERN_TFP: ::c_int = 61; -pub const KERN_PROCNAME: ::c_int = 62; -pub const KERN_THALTSTACK: ::c_int = 63; -pub const KERN_SPECULATIVE_READS: ::c_int = 64; -pub const KERN_OSVERSION: ::c_int = 65; -pub const KERN_SAFEBOOT: ::c_int = 66; -pub const KERN_RAGEVNODE: ::c_int = 68; -pub const KERN_TTY: ::c_int = 69; -pub const KERN_CHECKOPENEVT: ::c_int = 70; -pub const KERN_THREADNAME: ::c_int = 71; -pub const KERN_MAXID: ::c_int = 72; -pub const KERN_RAGE_PROC: ::c_int = 1; -pub const KERN_RAGE_THREAD: ::c_int = 2; -pub const KERN_UNRAGE_PROC: ::c_int = 3; -pub const KERN_UNRAGE_THREAD: ::c_int = 4; -pub const KERN_OPENEVT_PROC: ::c_int = 1; -pub const KERN_UNOPENEVT_PROC: ::c_int = 2; -pub const KERN_TFP_POLICY: ::c_int = 1; -pub const KERN_TFP_POLICY_DENY: ::c_int = 0; -pub const KERN_TFP_POLICY_DEFAULT: ::c_int = 2; -pub const KERN_KDEFLAGS: ::c_int = 1; -pub const KERN_KDDFLAGS: ::c_int = 2; -pub const KERN_KDENABLE: ::c_int = 3; -pub const KERN_KDSETBUF: ::c_int = 4; -pub const KERN_KDGETBUF: ::c_int = 5; -pub const KERN_KDSETUP: ::c_int = 6; -pub const KERN_KDREMOVE: ::c_int = 7; -pub const KERN_KDSETREG: ::c_int = 8; -pub const KERN_KDGETREG: ::c_int = 9; -pub const KERN_KDREADTR: ::c_int = 10; -pub const KERN_KDPIDTR: ::c_int = 11; -pub const KERN_KDTHRMAP: ::c_int = 12; -pub const KERN_KDPIDEX: ::c_int = 14; -pub const KERN_KDSETRTCDEC: ::c_int = 15; -pub const KERN_KDGETENTROPY: ::c_int = 16; -pub const KERN_KDWRITETR: ::c_int = 17; -pub const KERN_KDWRITEMAP: ::c_int = 18; -pub const KERN_KDENABLE_BG_TRACE: ::c_int = 19; -pub const KERN_KDDISABLE_BG_TRACE: ::c_int = 20; -pub const KERN_KDREADCURTHRMAP: ::c_int = 21; -pub const KERN_KDSET_TYPEFILTER: ::c_int = 22; -pub const KERN_KDBUFWAIT: ::c_int = 23; -pub const KERN_KDCPUMAP: ::c_int = 24; -pub const KERN_PROC_ALL: ::c_int = 0; -pub const KERN_PROC_PID: ::c_int = 1; -pub const KERN_PROC_PGRP: ::c_int = 2; -pub const KERN_PROC_SESSION: ::c_int = 3; -pub const KERN_PROC_TTY: ::c_int = 4; -pub const KERN_PROC_UID: ::c_int = 5; -pub const KERN_PROC_RUID: ::c_int = 6; -pub const KERN_PROC_LCID: ::c_int = 7; -pub const KIPC_MAXSOCKBUF: ::c_int = 1; -pub const KIPC_SOCKBUF_WASTE: ::c_int = 2; -pub const KIPC_SOMAXCONN: ::c_int = 3; -pub const KIPC_MAX_LINKHDR: ::c_int = 4; -pub const KIPC_MAX_PROTOHDR: ::c_int = 5; -pub const KIPC_MAX_HDR: ::c_int = 6; -pub const KIPC_MAX_DATALEN: ::c_int = 7; -pub const KIPC_MBSTAT: ::c_int = 8; -pub const KIPC_NMBCLUSTERS: ::c_int = 9; -pub const KIPC_SOQLIMITCOMPAT: ::c_int = 10; -pub const VM_METER: ::c_int = 1; -pub const VM_LOADAVG: ::c_int = 2; -pub const VM_MACHFACTOR: ::c_int = 4; -pub const VM_SWAPUSAGE: ::c_int = 5; -pub const VM_MAXID: ::c_int = 6; -pub const HW_MACHINE: ::c_int = 1; -pub const HW_MODEL: ::c_int = 2; -pub const HW_NCPU: ::c_int = 3; -pub const HW_BYTEORDER: ::c_int = 4; -pub const HW_PHYSMEM: ::c_int = 5; -pub const HW_USERMEM: ::c_int = 6; -pub const HW_PAGESIZE: ::c_int = 7; -pub const HW_DISKNAMES: ::c_int = 8; -pub const HW_DISKSTATS: ::c_int = 9; -pub const HW_EPOCH: ::c_int = 10; -pub const HW_FLOATINGPT: ::c_int = 11; -pub const HW_MACHINE_ARCH: ::c_int = 12; -pub const HW_VECTORUNIT: ::c_int = 13; -pub const HW_BUS_FREQ: ::c_int = 14; -pub const HW_CPU_FREQ: ::c_int = 15; -pub const HW_CACHELINE: ::c_int = 16; -pub const HW_L1ICACHESIZE: ::c_int = 17; -pub const HW_L1DCACHESIZE: ::c_int = 18; -pub const HW_L2SETTINGS: ::c_int = 19; -pub const HW_L2CACHESIZE: ::c_int = 20; -pub const HW_L3SETTINGS: ::c_int = 21; -pub const HW_L3CACHESIZE: ::c_int = 22; -pub const HW_TB_FREQ: ::c_int = 23; -pub const HW_MEMSIZE: ::c_int = 24; -pub const HW_AVAILCPU: ::c_int = 25; -pub const HW_MAXID: ::c_int = 26; -pub const USER_CS_PATH: ::c_int = 1; -pub const USER_BC_BASE_MAX: ::c_int = 2; -pub const USER_BC_DIM_MAX: ::c_int = 3; -pub const USER_BC_SCALE_MAX: ::c_int = 4; -pub const USER_BC_STRING_MAX: ::c_int = 5; -pub const USER_COLL_WEIGHTS_MAX: ::c_int = 6; -pub const USER_EXPR_NEST_MAX: ::c_int = 7; -pub const USER_LINE_MAX: ::c_int = 8; -pub const USER_RE_DUP_MAX: ::c_int = 9; -pub const USER_POSIX2_VERSION: ::c_int = 10; -pub const USER_POSIX2_C_BIND: ::c_int = 11; -pub const USER_POSIX2_C_DEV: ::c_int = 12; -pub const USER_POSIX2_CHAR_TERM: ::c_int = 13; -pub const USER_POSIX2_FORT_DEV: ::c_int = 14; -pub const USER_POSIX2_FORT_RUN: ::c_int = 15; -pub const USER_POSIX2_LOCALEDEF: ::c_int = 16; -pub const USER_POSIX2_SW_DEV: ::c_int = 17; -pub const USER_POSIX2_UPE: ::c_int = 18; -pub const USER_STREAM_MAX: ::c_int = 19; -pub const USER_TZNAME_MAX: ::c_int = 20; -pub const USER_MAXID: ::c_int = 21; -pub const CTL_DEBUG_NAME: ::c_int = 0; -pub const CTL_DEBUG_VALUE: ::c_int = 1; -pub const CTL_DEBUG_MAXID: ::c_int = 20; - -pub const POLLRDNORM: ::c_short = 0x040; -pub const POLLWRNORM: ::c_short = 0x004; -pub const POLLRDBAND: ::c_short = 0x080; -pub const POLLWRBAND: ::c_short = 0x100; - -pub const PRIO_DARWIN_THREAD: ::c_int = 3; -pub const PRIO_DARWIN_PROCESS: ::c_int = 4; -pub const PRIO_DARWIN_BG: ::c_int = 0x1000; -pub const PRIO_DARWIN_NONUI: ::c_int = 0x1001; - -pub const SEM_FAILED: *mut sem_t = -1isize as *mut ::sem_t; - -pub const SIGEV_NONE: ::c_int = 0; -pub const SIGEV_SIGNAL: ::c_int = 1; -pub const SIGEV_THREAD: ::c_int = 3; - -pub const AIO_CANCELED: ::c_int = 2; -pub const AIO_NOTCANCELED: ::c_int = 4; -pub const AIO_ALLDONE: ::c_int = 1; -pub const AIO_LISTIO_MAX: ::c_int = 16; -pub const LIO_NOP: ::c_int = 0; -pub const LIO_WRITE: ::c_int = 2; -pub const LIO_READ: ::c_int = 1; -pub const LIO_WAIT: ::c_int = 2; -pub const LIO_NOWAIT: ::c_int = 1; - -f! { - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { - status >> 8 - } - - pub fn _WSTATUS(status: ::c_int) -> ::c_int { - status & 0x7f - } - - pub fn WIFCONTINUED(status: ::c_int) -> bool { - _WSTATUS(status) == _WSTOPPED && WSTOPSIG(status) == 0x13 - } - - pub fn WIFSIGNALED(status: ::c_int) -> bool { - _WSTATUS(status) != _WSTOPPED && _WSTATUS(status) != 0 - } - - pub fn WIFSTOPPED(status: ::c_int) -> bool { - _WSTATUS(status) == _WSTOPPED && WSTOPSIG(status) != 0x13 - } -} - -extern { - pub fn aio_read(aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_fsync(op: ::c_int, aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_error(aiocbp: *const aiocb) -> ::c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ::ssize_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "aio_suspend$UNIX2003")] - pub fn aio_suspend(aiocb_list: *const *const aiocb, nitems: ::c_int, - timeout: *const ::timespec) -> ::c_int; - pub fn aio_cancel(fd: ::c_int, aiocbp: *mut aiocb) -> ::c_int; - pub fn lio_listio(mode: ::c_int, aiocb_list: *const *mut aiocb, - nitems: ::c_int, sevp: *mut sigevent) -> ::c_int; - - pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int; - - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn setutxent(); - pub fn endutxent(); - pub fn utmpxname(file: *const ::c_char) -> ::c_int; - - pub fn getnameinfo(sa: *const ::sockaddr, - salen: ::socklen_t, - host: *mut ::c_char, - hostlen: ::socklen_t, - serv: *mut ::c_char, - sevlen: ::socklen_t, - flags: ::c_int) -> ::c_int; - pub fn mincore(addr: *const ::c_void, len: ::size_t, - vec: *mut ::c_char) -> ::c_int; - pub fn sysctlnametomib(name: *const ::c_char, - mibp: *mut ::c_int, - sizep: *mut ::size_t) - -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "mprotect$UNIX2003")] - pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int) - -> ::c_int; - pub fn shm_open(name: *const ::c_char, oflag: ::c_int, ...) -> ::c_int; - pub fn sysctl(name: *mut ::c_int, - namelen: ::c_uint, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *mut ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn sysctlbyname(name: *const ::c_char, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *mut ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn mach_absolute_time() -> u64; - pub fn mach_timebase_info(info: *mut ::mach_timebase_info) -> ::c_int; - pub fn pthread_setname_np(name: *const ::c_char) -> ::c_int; - pub fn pthread_get_stackaddr_np(thread: ::pthread_t) -> *mut ::c_void; - pub fn pthread_get_stacksize_np(thread: ::pthread_t) -> ::size_t; - pub fn __error() -> *mut ::c_int; - pub fn backtrace(buf: *mut *mut ::c_void, - sz: ::c_int) -> ::c_int; - #[cfg_attr(target_os = "macos", link_name = "statfs$INODE64")] - pub fn statfs(path: *const ::c_char, buf: *mut statfs) -> ::c_int; - #[cfg_attr(target_os = "macos", link_name = "fstatfs$INODE64")] - pub fn fstatfs(fd: ::c_int, buf: *mut statfs) -> ::c_int; - pub fn kevent(kq: ::c_int, - changelist: *const ::kevent, - nchanges: ::c_int, - eventlist: *mut ::kevent, - nevents: ::c_int, - timeout: *const ::timespec) -> ::c_int; - pub fn kevent64(kq: ::c_int, - changelist: *const ::kevent64_s, - nchanges: ::c_int, - eventlist: *mut ::kevent64_s, - nevents: ::c_int, - flags: ::c_uint, - timeout: *const ::timespec) -> ::c_int; - pub fn mount(src: *const ::c_char, - target: *const ::c_char, - flags: ::c_int, - data: *mut ::c_void) -> ::c_int; - pub fn ptrace(requeset: ::c_int, - pid: ::pid_t, - addr: *mut ::c_char, - data: ::c_int) -> ::c_int; - pub fn quotactl(special: *const ::c_char, - cmd: ::c_int, - id: ::c_int, - data: *mut ::c_char) -> ::c_int; - pub fn sethostname(name: *const ::c_char, len: ::c_int) -> ::c_int; - pub fn sendfile(fd: ::c_int, - s: ::c_int, - offset: ::off_t, - len: *mut ::off_t, - hdtr: *mut ::sf_hdtr, - flags: ::c_int) -> ::c_int; - pub fn openpty(amaster: *mut ::c_int, - aslave: *mut ::c_int, - name: *mut ::c_char, - termp: *mut termios, - winp: *mut ::winsize) -> ::c_int; - pub fn forkpty(amaster: *mut ::c_int, - name: *mut ::c_char, - termp: *mut termios, - winp: *mut ::winsize) -> ::pid_t; - pub fn duplocale(base: ::locale_t) -> ::locale_t; - pub fn freelocale(loc: ::locale_t) -> ::c_int; - pub fn localeconv_l(loc: ::locale_t) -> *mut lconv; - pub fn newlocale(mask: ::c_int, - locale: *const ::c_char, - base: ::locale_t) -> ::locale_t; - pub fn uselocale(loc: ::locale_t) -> ::locale_t; - pub fn querylocale(mask: ::c_int, loc: ::locale_t) -> *const ::c_char; - pub fn getpriority(which: ::c_int, who: ::id_t) -> ::c_int; - pub fn setpriority(which: ::c_int, who: ::id_t, prio: ::c_int) -> ::c_int; - - pub fn openat(dirfd: ::c_int, pathname: *const ::c_char, - flags: ::c_int, ...) -> ::c_int; - pub fn faccessat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::c_int, flags: ::c_int) -> ::c_int; - pub fn fchmodat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t, flags: ::c_int) -> ::c_int; - pub fn fchownat(dirfd: ::c_int, pathname: *const ::c_char, - owner: ::uid_t, group: ::gid_t, - flags: ::c_int) -> ::c_int; - #[cfg_attr(target_os = "macos", link_name = "fstatat$INODE64")] - pub fn fstatat(dirfd: ::c_int, pathname: *const ::c_char, - buf: *mut stat, flags: ::c_int) -> ::c_int; - pub fn linkat(olddirfd: ::c_int, oldpath: *const ::c_char, - newdirfd: ::c_int, newpath: *const ::c_char, - flags: ::c_int) -> ::c_int; - pub fn mkdirat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t) -> ::c_int; - pub fn readlinkat(dirfd: ::c_int, pathname: *const ::c_char, - buf: *mut ::c_char, bufsiz: ::size_t) -> ::ssize_t; - pub fn renameat(olddirfd: ::c_int, oldpath: *const ::c_char, - newdirfd: ::c_int, newpath: *const ::c_char) - -> ::c_int; - pub fn symlinkat(target: *const ::c_char, newdirfd: ::c_int, - linkpath: *const ::c_char) -> ::c_int; - pub fn unlinkat(dirfd: ::c_int, pathname: *const ::c_char, - flags: ::c_int) -> ::c_int; -} - -cfg_if! { - if #[cfg(any(target_arch = "arm", target_arch = "x86"))] { - mod b32; - pub use self::b32::*; - } else if #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] { - mod b64; - pub use self::b64::*; - } else { - // Unknown target_arch - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/dragonfly/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/dragonfly/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/dragonfly/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/dragonfly/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,324 +0,0 @@ -pub type clock_t = u64; -pub type ino_t = u64; -pub type nlink_t = u32; -pub type blksize_t = i64; -pub type clockid_t = ::c_ulong; - -pub type c_long = i64; -pub type c_ulong = u64; -pub type time_t = i64; -pub type suseconds_t = i64; - -pub type uuid_t = ::uuid; - -pub type fsblkcnt_t = u64; -pub type fsfilcnt_t = u64; - -s! { - pub struct aiocb { - pub aio_fildes: ::c_int, - pub aio_offset: ::off_t, - pub aio_buf: *mut ::c_void, - pub aio_nbytes: ::size_t, - pub aio_sigevent: sigevent, - pub aio_lio_opcode: ::c_int, - pub aio_reqprio: ::c_int, - _aio_val: ::c_int, - _aio_err: ::c_int - } - - pub struct dirent { - pub d_fileno: ::ino_t, - pub d_namlen: u16, - pub d_type: u8, - __unused1: u8, - __unused2: u32, - pub d_name: [::c_char; 256], - } - - pub struct uuid { - pub time_low: u32, - pub time_mid: u16, - pub time_hi_and_version: u16, - pub clock_seq_hi_and_reserved: u8, - pub clock_seq_low: u8, - pub node: [u8; 6], - } - - pub struct sigevent { - pub sigev_notify: ::c_int, - pub sigev_signo: ::c_int, //actually a union - #[cfg(target_pointer_width = "64")] - __unused1: ::c_int, - pub sigev_value: ::sigval, - __unused2: *mut ::c_void //actually a function pointer - } - - pub struct statvfs { - pub f_bsize: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_favail: ::fsfilcnt_t, - pub f_fsid: ::c_ulong, - pub f_flag: ::c_ulong, - pub f_namemax: ::c_ulong, - pub f_owner: ::uid_t, - pub f_type: ::c_uint, - pub f_syncreads: u64, - pub f_syncwrites: u64, - pub f_asyncreads: u64, - pub f_asyncwrites: u64, - pub f_fsid_uuid: ::uuid_t, - pub f_uid_uuid: ::uuid_t, - } - - pub struct stat { - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_dev: ::dev_t, - pub st_mode: ::mode_t, - pub st_padding1: ::uint16_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_size: ::off_t, - pub st_blocks: ::int64_t, - pub st_blksize: ::uint32_t, - pub st_flags: ::uint32_t, - pub st_gen: ::uint32_t, - pub st_lspare: ::int32_t, - pub st_qspare1: ::int64_t, - pub st_qspare2: ::int64_t, - } -} - -pub const RAND_MAX: ::c_int = 0x7fff_ffff; -pub const PTHREAD_STACK_MIN: ::size_t = 1024; -pub const SIGSTKSZ: ::size_t = 40960; -pub const MADV_INVAL: ::c_int = 10; -pub const O_CLOEXEC: ::c_int = 0x00020000; -pub const F_GETLK: ::c_int = 7; -pub const F_SETLK: ::c_int = 8; -pub const F_SETLKW: ::c_int = 9; -pub const ELAST: ::c_int = 99; -pub const RLIMIT_POSIXLOCKS: ::c_int = 11; -pub const RLIM_NLIMITS: ::rlim_t = 12; - -pub const Q_GETQUOTA: ::c_int = 0x300; -pub const Q_SETQUOTA: ::c_int = 0x400; - -pub const CLOCK_REALTIME: clockid_t = 0; -pub const CLOCK_VIRTUAL: clockid_t = 1; -pub const CLOCK_PROF: clockid_t = 2; -pub const CLOCK_MONOTONIC: clockid_t = 4; -pub const CLOCK_UPTIME: clockid_t = 5; -pub const CLOCK_UPTIME_PRECISE: clockid_t = 7; -pub const CLOCK_UPTIME_FAST: clockid_t = 8; -pub const CLOCK_REALTIME_PRECISE: clockid_t = 9; -pub const CLOCK_REALTIME_FAST: clockid_t = 10; -pub const CLOCK_MONOTONIC_PRECISE: clockid_t = 11; -pub const CLOCK_MONOTONIC_FAST: clockid_t = 12; -pub const CLOCK_SECOND: clockid_t = 13; -pub const CLOCK_THREAD_CPUTIME_ID: clockid_t = 14; -pub const CLOCK_PROCESS_CPUTIME_ID: clockid_t = 15; - -pub const CTL_UNSPEC: ::c_int = 0; -pub const CTL_KERN: ::c_int = 1; -pub const CTL_VM: ::c_int = 2; -pub const CTL_VFS: ::c_int = 3; -pub const CTL_NET: ::c_int = 4; -pub const CTL_DEBUG: ::c_int = 5; -pub const CTL_HW: ::c_int = 6; -pub const CTL_MACHDEP: ::c_int = 7; -pub const CTL_USER: ::c_int = 8; -pub const CTL_P1003_1B: ::c_int = 9; -pub const CTL_LWKT: ::c_int = 10; -pub const CTL_MAXID: ::c_int = 11; -pub const KERN_OSTYPE: ::c_int = 1; -pub const KERN_OSRELEASE: ::c_int = 2; -pub const KERN_OSREV: ::c_int = 3; -pub const KERN_VERSION: ::c_int = 4; -pub const KERN_MAXVNODES: ::c_int = 5; -pub const KERN_MAXPROC: ::c_int = 6; -pub const KERN_MAXFILES: ::c_int = 7; -pub const KERN_ARGMAX: ::c_int = 8; -pub const KERN_SECURELVL: ::c_int = 9; -pub const KERN_HOSTNAME: ::c_int = 10; -pub const KERN_HOSTID: ::c_int = 11; -pub const KERN_CLOCKRATE: ::c_int = 12; -pub const KERN_VNODE: ::c_int = 13; -pub const KERN_PROC: ::c_int = 14; -pub const KERN_FILE: ::c_int = 15; -pub const KERN_PROF: ::c_int = 16; -pub const KERN_POSIX1: ::c_int = 17; -pub const KERN_NGROUPS: ::c_int = 18; -pub const KERN_JOB_CONTROL: ::c_int = 19; -pub const KERN_SAVED_IDS: ::c_int = 20; -pub const KERN_BOOTTIME: ::c_int = 21; -pub const KERN_NISDOMAINNAME: ::c_int = 22; -pub const KERN_UPDATEINTERVAL: ::c_int = 23; -pub const KERN_OSRELDATE: ::c_int = 24; -pub const KERN_NTP_PLL: ::c_int = 25; -pub const KERN_BOOTFILE: ::c_int = 26; -pub const KERN_MAXFILESPERPROC: ::c_int = 27; -pub const KERN_MAXPROCPERUID: ::c_int = 28; -pub const KERN_DUMPDEV: ::c_int = 29; -pub const KERN_IPC: ::c_int = 30; -pub const KERN_DUMMY: ::c_int = 31; -pub const KERN_PS_STRINGS: ::c_int = 32; -pub const KERN_USRSTACK: ::c_int = 33; -pub const KERN_LOGSIGEXIT: ::c_int = 34; -pub const KERN_IOV_MAX: ::c_int = 35; -pub const KERN_MAXPOSIXLOCKSPERUID: ::c_int = 36; -pub const KERN_MAXID: ::c_int = 37; -pub const KERN_PROC_ALL: ::c_int = 0; -pub const KERN_PROC_PID: ::c_int = 1; -pub const KERN_PROC_PGRP: ::c_int = 2; -pub const KERN_PROC_SESSION: ::c_int = 3; -pub const KERN_PROC_TTY: ::c_int = 4; -pub const KERN_PROC_UID: ::c_int = 5; -pub const KERN_PROC_RUID: ::c_int = 6; -pub const KERN_PROC_ARGS: ::c_int = 7; -pub const KERN_PROC_CWD: ::c_int = 8; -pub const KERN_PROC_PATHNAME: ::c_int = 9; -pub const KERN_PROC_FLAGMASK: ::c_int = 0x10; -pub const KERN_PROC_FLAG_LWP: ::c_int = 0x10; -pub const KIPC_MAXSOCKBUF: ::c_int = 1; -pub const KIPC_SOCKBUF_WASTE: ::c_int = 2; -pub const KIPC_SOMAXCONN: ::c_int = 3; -pub const KIPC_MAX_LINKHDR: ::c_int = 4; -pub const KIPC_MAX_PROTOHDR: ::c_int = 5; -pub const KIPC_MAX_HDR: ::c_int = 6; -pub const KIPC_MAX_DATALEN: ::c_int = 7; -pub const KIPC_MBSTAT: ::c_int = 8; -pub const KIPC_NMBCLUSTERS: ::c_int = 9; -pub const HW_MACHINE: ::c_int = 1; -pub const HW_MODEL: ::c_int = 2; -pub const HW_NCPU: ::c_int = 3; -pub const HW_BYTEORDER: ::c_int = 4; -pub const HW_PHYSMEM: ::c_int = 5; -pub const HW_USERMEM: ::c_int = 6; -pub const HW_PAGESIZE: ::c_int = 7; -pub const HW_DISKNAMES: ::c_int = 8; -pub const HW_DISKSTATS: ::c_int = 9; -pub const HW_FLOATINGPT: ::c_int = 10; -pub const HW_MACHINE_ARCH: ::c_int = 11; -pub const HW_MACHINE_PLATFORM: ::c_int = 12; -pub const HW_SENSORS: ::c_int = 13; -pub const HW_MAXID: ::c_int = 14; -pub const USER_CS_PATH: ::c_int = 1; -pub const USER_BC_BASE_MAX: ::c_int = 2; -pub const USER_BC_DIM_MAX: ::c_int = 3; -pub const USER_BC_SCALE_MAX: ::c_int = 4; -pub const USER_BC_STRING_MAX: ::c_int = 5; -pub const USER_COLL_WEIGHTS_MAX: ::c_int = 6; -pub const USER_EXPR_NEST_MAX: ::c_int = 7; -pub const USER_LINE_MAX: ::c_int = 8; -pub const USER_RE_DUP_MAX: ::c_int = 9; -pub const USER_POSIX2_VERSION: ::c_int = 10; -pub const USER_POSIX2_C_BIND: ::c_int = 11; -pub const USER_POSIX2_C_DEV: ::c_int = 12; -pub const USER_POSIX2_CHAR_TERM: ::c_int = 13; -pub const USER_POSIX2_FORT_DEV: ::c_int = 14; -pub const USER_POSIX2_FORT_RUN: ::c_int = 15; -pub const USER_POSIX2_LOCALEDEF: ::c_int = 16; -pub const USER_POSIX2_SW_DEV: ::c_int = 17; -pub const USER_POSIX2_UPE: ::c_int = 18; -pub const USER_STREAM_MAX: ::c_int = 19; -pub const USER_TZNAME_MAX: ::c_int = 20; -pub const USER_MAXID: ::c_int = 21; -pub const CTL_P1003_1B_ASYNCHRONOUS_IO: ::c_int = 1; -pub const CTL_P1003_1B_MAPPED_FILES: ::c_int = 2; -pub const CTL_P1003_1B_MEMLOCK: ::c_int = 3; -pub const CTL_P1003_1B_MEMLOCK_RANGE: ::c_int = 4; -pub const CTL_P1003_1B_MEMORY_PROTECTION: ::c_int = 5; -pub const CTL_P1003_1B_MESSAGE_PASSING: ::c_int = 6; -pub const CTL_P1003_1B_PRIORITIZED_IO: ::c_int = 7; -pub const CTL_P1003_1B_PRIORITY_SCHEDULING: ::c_int = 8; -pub const CTL_P1003_1B_REALTIME_SIGNALS: ::c_int = 9; -pub const CTL_P1003_1B_SEMAPHORES: ::c_int = 10; -pub const CTL_P1003_1B_FSYNC: ::c_int = 11; -pub const CTL_P1003_1B_SHARED_MEMORY_OBJECTS: ::c_int = 12; -pub const CTL_P1003_1B_SYNCHRONIZED_IO: ::c_int = 13; -pub const CTL_P1003_1B_TIMERS: ::c_int = 14; -pub const CTL_P1003_1B_AIO_LISTIO_MAX: ::c_int = 15; -pub const CTL_P1003_1B_AIO_MAX: ::c_int = 16; -pub const CTL_P1003_1B_AIO_PRIO_DELTA_MAX: ::c_int = 17; -pub const CTL_P1003_1B_DELAYTIMER_MAX: ::c_int = 18; -pub const CTL_P1003_1B_UNUSED1: ::c_int = 19; -pub const CTL_P1003_1B_PAGESIZE: ::c_int = 20; -pub const CTL_P1003_1B_RTSIG_MAX: ::c_int = 21; -pub const CTL_P1003_1B_SEM_NSEMS_MAX: ::c_int = 22; -pub const CTL_P1003_1B_SEM_VALUE_MAX: ::c_int = 23; -pub const CTL_P1003_1B_SIGQUEUE_MAX: ::c_int = 24; -pub const CTL_P1003_1B_TIMER_MAX: ::c_int = 25; -pub const CTL_P1003_1B_MAXID: ::c_int = 26; - -pub const EVFILT_READ: ::int16_t = -1; -pub const EVFILT_WRITE: ::int16_t = -2; -pub const EVFILT_AIO: ::int16_t = -3; -pub const EVFILT_VNODE: ::int16_t = -4; -pub const EVFILT_PROC: ::int16_t = -5; -pub const EVFILT_SIGNAL: ::int16_t = -6; -pub const EVFILT_TIMER: ::int16_t = -7; -pub const EVFILT_PROCDESC: ::int16_t = -8; -pub const EVFILT_USER: ::int16_t = -9; -pub const EVFILT_FS: ::int16_t = -10; - -pub const EV_ADD: ::uint16_t = 0x1; -pub const EV_DELETE: ::uint16_t = 0x2; -pub const EV_ENABLE: ::uint16_t = 0x4; -pub const EV_DISABLE: ::uint16_t = 0x8; -pub const EV_ONESHOT: ::uint16_t = 0x10; -pub const EV_CLEAR: ::uint16_t = 0x20; -pub const EV_RECEIPT: ::uint16_t = 0x40; -pub const EV_DISPATCH: ::uint16_t = 0x80; -pub const EV_NODATA: ::uint16_t = 0x1000; -pub const EV_FLAG1: ::uint16_t = 0x2000; -pub const EV_ERROR: ::uint16_t = 0x4000; -pub const EV_EOF: ::uint16_t = 0x8000; -pub const EV_SYSFLAGS: ::uint16_t = 0xf000; - -pub const NOTE_TRIGGER: ::uint32_t = 0x01000000; -pub const NOTE_FFNOP: ::uint32_t = 0x00000000; -pub const NOTE_FFAND: ::uint32_t = 0x40000000; -pub const NOTE_FFOR: ::uint32_t = 0x80000000; -pub const NOTE_FFCOPY: ::uint32_t = 0xc0000000; -pub const NOTE_FFCTRLMASK: ::uint32_t = 0xc0000000; -pub const NOTE_FFLAGSMASK: ::uint32_t = 0x00ffffff; -pub const NOTE_LOWAT: ::uint32_t = 0x00000001; -pub const NOTE_OOB: ::uint32_t = 0x00000002; -pub const NOTE_DELETE: ::uint32_t = 0x00000001; -pub const NOTE_WRITE: ::uint32_t = 0x00000002; -pub const NOTE_EXTEND: ::uint32_t = 0x00000004; -pub const NOTE_ATTRIB: ::uint32_t = 0x00000008; -pub const NOTE_LINK: ::uint32_t = 0x00000010; -pub const NOTE_RENAME: ::uint32_t = 0x00000020; -pub const NOTE_REVOKE: ::uint32_t = 0x00000040; -pub const NOTE_EXIT: ::uint32_t = 0x80000000; -pub const NOTE_FORK: ::uint32_t = 0x40000000; -pub const NOTE_EXEC: ::uint32_t = 0x20000000; -pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; -pub const NOTE_PCTRLMASK: ::uint32_t = 0xf0000000; -pub const NOTE_TRACK: ::uint32_t = 0x00000001; -pub const NOTE_TRACKERR: ::uint32_t = 0x00000002; -pub const NOTE_CHILD: ::uint32_t = 0x00000004; - -pub const MSG_NOSIGNAL: ::uint32_t = 0x400; - -extern { - pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int) - -> ::c_int; - pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,347 +0,0 @@ -pub type fflags_t = u32; -pub type clock_t = i32; -pub type ino_t = u32; -pub type lwpid_t = i32; -pub type nlink_t = u16; -pub type blksize_t = u32; -pub type clockid_t = ::c_int; - -pub type fsblkcnt_t = ::uint64_t; -pub type fsfilcnt_t = ::uint64_t; - -s! { - pub struct aiocb { - pub aio_fildes: ::c_int, - pub aio_offset: ::off_t, - pub aio_buf: *mut ::c_void, - pub aio_nbytes: ::size_t, - __unused1: [::c_int; 2], - __unused2: *mut ::c_void, - pub aio_lio_opcode: ::c_int, - pub aio_reqprio: ::c_int, - // unused 3 through 5 are the __aiocb_private structure - __unused3: ::c_long, - __unused4: ::c_long, - __unused5: *mut ::c_void, - pub aio_sigevent: sigevent - } - - pub struct dirent { - pub d_fileno: u32, - pub d_reclen: u16, - pub d_type: u8, - pub d_namlen: u8, - pub d_name: [::c_char; 256], - } - - pub struct sigevent { - pub sigev_notify: ::c_int, - pub sigev_signo: ::c_int, - pub sigev_value: ::sigval, - //The rest of the structure is actually a union. We expose only - //sigev_notify_thread_id because it's the most useful union member. - pub sigev_notify_thread_id: ::lwpid_t, - #[cfg(target_pointer_width = "64")] - __unused1: ::c_int, - __unused2: [::c_long; 7] - } - - pub struct statvfs { - pub f_bavail: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_blocks: ::fsblkcnt_t, - pub f_favail: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_bsize: ::c_ulong, - pub f_flag: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_fsid: ::c_ulong, - pub f_namemax: ::c_ulong, - } -} - -pub const SIGEV_THREAD_ID: ::c_int = 4; - -pub const RAND_MAX: ::c_int = 0x7fff_fffd; -pub const PTHREAD_STACK_MIN: ::size_t = 2048; -pub const SIGSTKSZ: ::size_t = 34816; -pub const SF_NODISKIO: ::c_int = 0x00000001; -pub const SF_MNOWAIT: ::c_int = 0x00000002; -pub const SF_SYNC: ::c_int = 0x00000004; -pub const O_CLOEXEC: ::c_int = 0x00100000; -pub const F_GETLK: ::c_int = 11; -pub const F_SETLK: ::c_int = 12; -pub const F_SETLKW: ::c_int = 13; -pub const ELAST: ::c_int = 96; -pub const RLIMIT_NPTS: ::c_int = 11; -pub const RLIMIT_SWAP: ::c_int = 12; -pub const RLIM_NLIMITS: ::rlim_t = 13; - -pub const Q_GETQUOTA: ::c_int = 0x700; -pub const Q_SETQUOTA: ::c_int = 0x800; - -pub const POSIX_FADV_NORMAL: ::c_int = 0; -pub const POSIX_FADV_RANDOM: ::c_int = 1; -pub const POSIX_FADV_SEQUENTIAL: ::c_int = 2; -pub const POSIX_FADV_WILLNEED: ::c_int = 3; -pub const POSIX_FADV_DONTNEED: ::c_int = 4; -pub const POSIX_FADV_NOREUSE: ::c_int = 5; - -pub const EVFILT_READ: ::int16_t = -1; -pub const EVFILT_WRITE: ::int16_t = -2; -pub const EVFILT_AIO: ::int16_t = -3; -pub const EVFILT_VNODE: ::int16_t = -4; -pub const EVFILT_PROC: ::int16_t = -5; -pub const EVFILT_SIGNAL: ::int16_t = -6; -pub const EVFILT_TIMER: ::int16_t = -7; -pub const EVFILT_FS: ::int16_t = -9; -pub const EVFILT_LIO: ::int16_t = -10; -pub const EVFILT_USER: ::int16_t = -11; - -pub const EV_ADD: ::uint16_t = 0x1; -pub const EV_DELETE: ::uint16_t = 0x2; -pub const EV_ENABLE: ::uint16_t = 0x4; -pub const EV_DISABLE: ::uint16_t = 0x8; -pub const EV_ONESHOT: ::uint16_t = 0x10; -pub const EV_CLEAR: ::uint16_t = 0x20; -pub const EV_RECEIPT: ::uint16_t = 0x40; -pub const EV_DISPATCH: ::uint16_t = 0x80; -pub const EV_DROP: ::uint16_t = 0x1000; -pub const EV_FLAG1: ::uint16_t = 0x2000; -pub const EV_ERROR: ::uint16_t = 0x4000; -pub const EV_EOF: ::uint16_t = 0x8000; -pub const EV_SYSFLAGS: ::uint16_t = 0xf000; - -pub const NOTE_TRIGGER: ::uint32_t = 0x01000000; -pub const NOTE_FFNOP: ::uint32_t = 0x00000000; -pub const NOTE_FFAND: ::uint32_t = 0x40000000; -pub const NOTE_FFOR: ::uint32_t = 0x80000000; -pub const NOTE_FFCOPY: ::uint32_t = 0xc0000000; -pub const NOTE_FFCTRLMASK: ::uint32_t = 0xc0000000; -pub const NOTE_FFLAGSMASK: ::uint32_t = 0x00ffffff; -pub const NOTE_LOWAT: ::uint32_t = 0x00000001; -pub const NOTE_DELETE: ::uint32_t = 0x00000001; -pub const NOTE_WRITE: ::uint32_t = 0x00000002; -pub const NOTE_EXTEND: ::uint32_t = 0x00000004; -pub const NOTE_ATTRIB: ::uint32_t = 0x00000008; -pub const NOTE_LINK: ::uint32_t = 0x00000010; -pub const NOTE_RENAME: ::uint32_t = 0x00000020; -pub const NOTE_REVOKE: ::uint32_t = 0x00000040; -pub const NOTE_EXIT: ::uint32_t = 0x80000000; -pub const NOTE_FORK: ::uint32_t = 0x40000000; -pub const NOTE_EXEC: ::uint32_t = 0x20000000; -pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; -pub const NOTE_PCTRLMASK: ::uint32_t = 0xf0000000; -pub const NOTE_TRACK: ::uint32_t = 0x00000001; -pub const NOTE_TRACKERR: ::uint32_t = 0x00000002; -pub const NOTE_CHILD: ::uint32_t = 0x00000004; -pub const NOTE_SECONDS: ::uint32_t = 0x00000001; -pub const NOTE_MSECONDS: ::uint32_t = 0x00000002; -pub const NOTE_USECONDS: ::uint32_t = 0x00000004; -pub const NOTE_NSECONDS: ::uint32_t = 0x00000008; - -pub const MADV_PROTECT: ::c_int = 10; -pub const RUSAGE_THREAD: ::c_int = 1; - -pub const CLOCK_REALTIME: clockid_t = 0; -pub const CLOCK_VIRTUAL: clockid_t = 1; -pub const CLOCK_PROF: clockid_t = 2; -pub const CLOCK_MONOTONIC: clockid_t = 4; -pub const CLOCK_UPTIME: clockid_t = 5; -pub const CLOCK_UPTIME_PRECISE: clockid_t = 7; -pub const CLOCK_UPTIME_FAST: clockid_t = 8; -pub const CLOCK_REALTIME_PRECISE: clockid_t = 9; -pub const CLOCK_REALTIME_FAST: clockid_t = 10; -pub const CLOCK_MONOTONIC_PRECISE: clockid_t = 11; -pub const CLOCK_MONOTONIC_FAST: clockid_t = 12; -pub const CLOCK_SECOND: clockid_t = 13; -pub const CLOCK_THREAD_CPUTIME_ID: clockid_t = 14; -pub const CLOCK_PROCESS_CPUTIME_ID: clockid_t = 15; - -pub const CTL_UNSPEC: ::c_int = 0; -pub const CTL_KERN: ::c_int = 1; -pub const CTL_VM: ::c_int = 2; -pub const CTL_VFS: ::c_int = 3; -pub const CTL_NET: ::c_int = 4; -pub const CTL_DEBUG: ::c_int = 5; -pub const CTL_HW: ::c_int = 6; -pub const CTL_MACHDEP: ::c_int = 7; -pub const CTL_USER: ::c_int = 8; -pub const CTL_P1003_1B: ::c_int = 9; -pub const KERN_OSTYPE: ::c_int = 1; -pub const KERN_OSRELEASE: ::c_int = 2; -pub const KERN_OSREV: ::c_int = 3; -pub const KERN_VERSION: ::c_int = 4; -pub const KERN_MAXVNODES: ::c_int = 5; -pub const KERN_MAXPROC: ::c_int = 6; -pub const KERN_MAXFILES: ::c_int = 7; -pub const KERN_ARGMAX: ::c_int = 8; -pub const KERN_SECURELVL: ::c_int = 9; -pub const KERN_HOSTNAME: ::c_int = 10; -pub const KERN_HOSTID: ::c_int = 11; -pub const KERN_CLOCKRATE: ::c_int = 12; -pub const KERN_VNODE: ::c_int = 13; -pub const KERN_PROC: ::c_int = 14; -pub const KERN_FILE: ::c_int = 15; -pub const KERN_PROF: ::c_int = 16; -pub const KERN_POSIX1: ::c_int = 17; -pub const KERN_NGROUPS: ::c_int = 18; -pub const KERN_JOB_CONTROL: ::c_int = 19; -pub const KERN_SAVED_IDS: ::c_int = 20; -pub const KERN_BOOTTIME: ::c_int = 21; -pub const KERN_NISDOMAINNAME: ::c_int = 22; -pub const KERN_UPDATEINTERVAL: ::c_int = 23; -pub const KERN_OSRELDATE: ::c_int = 24; -pub const KERN_NTP_PLL: ::c_int = 25; -pub const KERN_BOOTFILE: ::c_int = 26; -pub const KERN_MAXFILESPERPROC: ::c_int = 27; -pub const KERN_MAXPROCPERUID: ::c_int = 28; -pub const KERN_DUMPDEV: ::c_int = 29; -pub const KERN_IPC: ::c_int = 30; -pub const KERN_DUMMY: ::c_int = 31; -pub const KERN_PS_STRINGS: ::c_int = 32; -pub const KERN_USRSTACK: ::c_int = 33; -pub const KERN_LOGSIGEXIT: ::c_int = 34; -pub const KERN_IOV_MAX: ::c_int = 35; -pub const KERN_HOSTUUID: ::c_int = 36; -pub const KERN_ARND: ::c_int = 37; -pub const KERN_PROC_ALL: ::c_int = 0; -pub const KERN_PROC_PID: ::c_int = 1; -pub const KERN_PROC_PGRP: ::c_int = 2; -pub const KERN_PROC_SESSION: ::c_int = 3; -pub const KERN_PROC_TTY: ::c_int = 4; -pub const KERN_PROC_UID: ::c_int = 5; -pub const KERN_PROC_RUID: ::c_int = 6; -pub const KERN_PROC_ARGS: ::c_int = 7; -pub const KERN_PROC_PROC: ::c_int = 8; -pub const KERN_PROC_SV_NAME: ::c_int = 9; -pub const KERN_PROC_RGID: ::c_int = 10; -pub const KERN_PROC_GID: ::c_int = 11; -pub const KERN_PROC_PATHNAME: ::c_int = 12; -pub const KERN_PROC_OVMMAP: ::c_int = 13; -pub const KERN_PROC_OFILEDESC: ::c_int = 14; -pub const KERN_PROC_KSTACK: ::c_int = 15; -pub const KERN_PROC_INC_THREAD: ::c_int = 0x10; -pub const KERN_PROC_VMMAP: ::c_int = 32; -pub const KERN_PROC_FILEDESC: ::c_int = 33; -pub const KERN_PROC_GROUPS: ::c_int = 34; -pub const KERN_PROC_ENV: ::c_int = 35; -pub const KERN_PROC_AUXV: ::c_int = 36; -pub const KERN_PROC_RLIMIT: ::c_int = 37; -pub const KERN_PROC_PS_STRINGS: ::c_int = 38; -pub const KERN_PROC_UMASK: ::c_int = 39; -pub const KERN_PROC_OSREL: ::c_int = 40; -pub const KERN_PROC_SIGTRAMP: ::c_int = 41; -pub const KIPC_MAXSOCKBUF: ::c_int = 1; -pub const KIPC_SOCKBUF_WASTE: ::c_int = 2; -pub const KIPC_SOMAXCONN: ::c_int = 3; -pub const KIPC_MAX_LINKHDR: ::c_int = 4; -pub const KIPC_MAX_PROTOHDR: ::c_int = 5; -pub const KIPC_MAX_HDR: ::c_int = 6; -pub const KIPC_MAX_DATALEN: ::c_int = 7; -pub const HW_MACHINE: ::c_int = 1; -pub const HW_MODEL: ::c_int = 2; -pub const HW_NCPU: ::c_int = 3; -pub const HW_BYTEORDER: ::c_int = 4; -pub const HW_PHYSMEM: ::c_int = 5; -pub const HW_USERMEM: ::c_int = 6; -pub const HW_PAGESIZE: ::c_int = 7; -pub const HW_DISKNAMES: ::c_int = 8; -pub const HW_DISKSTATS: ::c_int = 9; -pub const HW_FLOATINGPT: ::c_int = 10; -pub const HW_MACHINE_ARCH: ::c_int = 11; -pub const HW_REALMEM: ::c_int = 12; -pub const USER_CS_PATH: ::c_int = 1; -pub const USER_BC_BASE_MAX: ::c_int = 2; -pub const USER_BC_DIM_MAX: ::c_int = 3; -pub const USER_BC_SCALE_MAX: ::c_int = 4; -pub const USER_BC_STRING_MAX: ::c_int = 5; -pub const USER_COLL_WEIGHTS_MAX: ::c_int = 6; -pub const USER_EXPR_NEST_MAX: ::c_int = 7; -pub const USER_LINE_MAX: ::c_int = 8; -pub const USER_RE_DUP_MAX: ::c_int = 9; -pub const USER_POSIX2_VERSION: ::c_int = 10; -pub const USER_POSIX2_C_BIND: ::c_int = 11; -pub const USER_POSIX2_C_DEV: ::c_int = 12; -pub const USER_POSIX2_CHAR_TERM: ::c_int = 13; -pub const USER_POSIX2_FORT_DEV: ::c_int = 14; -pub const USER_POSIX2_FORT_RUN: ::c_int = 15; -pub const USER_POSIX2_LOCALEDEF: ::c_int = 16; -pub const USER_POSIX2_SW_DEV: ::c_int = 17; -pub const USER_POSIX2_UPE: ::c_int = 18; -pub const USER_STREAM_MAX: ::c_int = 19; -pub const USER_TZNAME_MAX: ::c_int = 20; -pub const CTL_P1003_1B_ASYNCHRONOUS_IO: ::c_int = 1; -pub const CTL_P1003_1B_MAPPED_FILES: ::c_int = 2; -pub const CTL_P1003_1B_MEMLOCK: ::c_int = 3; -pub const CTL_P1003_1B_MEMLOCK_RANGE: ::c_int = 4; -pub const CTL_P1003_1B_MEMORY_PROTECTION: ::c_int = 5; -pub const CTL_P1003_1B_MESSAGE_PASSING: ::c_int = 6; -pub const CTL_P1003_1B_PRIORITIZED_IO: ::c_int = 7; -pub const CTL_P1003_1B_PRIORITY_SCHEDULING: ::c_int = 8; -pub const CTL_P1003_1B_REALTIME_SIGNALS: ::c_int = 9; -pub const CTL_P1003_1B_SEMAPHORES: ::c_int = 10; -pub const CTL_P1003_1B_FSYNC: ::c_int = 11; -pub const CTL_P1003_1B_SHARED_MEMORY_OBJECTS: ::c_int = 12; -pub const CTL_P1003_1B_SYNCHRONIZED_IO: ::c_int = 13; -pub const CTL_P1003_1B_TIMERS: ::c_int = 14; -pub const CTL_P1003_1B_AIO_LISTIO_MAX: ::c_int = 15; -pub const CTL_P1003_1B_AIO_MAX: ::c_int = 16; -pub const CTL_P1003_1B_AIO_PRIO_DELTA_MAX: ::c_int = 17; -pub const CTL_P1003_1B_DELAYTIMER_MAX: ::c_int = 18; -pub const CTL_P1003_1B_MQ_OPEN_MAX: ::c_int = 19; -pub const CTL_P1003_1B_PAGESIZE: ::c_int = 20; -pub const CTL_P1003_1B_RTSIG_MAX: ::c_int = 21; -pub const CTL_P1003_1B_SEM_NSEMS_MAX: ::c_int = 22; -pub const CTL_P1003_1B_SEM_VALUE_MAX: ::c_int = 23; -pub const CTL_P1003_1B_SIGQUEUE_MAX: ::c_int = 24; -pub const CTL_P1003_1B_TIMER_MAX: ::c_int = 25; - -// The *_MAXID constants never should've been used outside of the -// FreeBSD base system. And with the exception of CTL_P1003_1B_MAXID, -// they were all removed in svn r262489. They remain here for backwards -// compatibility only, and are scheduled to be removed in libc 1.0.0. -#[doc(hidden)] -pub const CTL_MAXID: ::c_int = 10; -#[doc(hidden)] -pub const KERN_MAXID: ::c_int = 38; -#[doc(hidden)] -pub const HW_MAXID: ::c_int = 13; -#[doc(hidden)] -pub const USER_MAXID: ::c_int = 21; -#[doc(hidden)] -pub const CTL_P1003_1B_MAXID: ::c_int = 26; - -pub const MSG_NOSIGNAL: ::c_int = 0x20000; - -extern { - pub fn __error() -> *mut ::c_int; - - pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int) - -> ::c_int; - - pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; - - pub fn posix_fallocate(fd: ::c_int, offset: ::off_t, - len: ::off_t) -> ::c_int; - pub fn posix_fadvise(fd: ::c_int, offset: ::off_t, len: ::off_t, - advise: ::c_int) -> ::c_int; - pub fn mkostemp(template: *mut ::c_char, flags: ::c_int) -> ::c_int; - pub fn mkostemps(template: *mut ::c_char, - suffixlen: ::c_int, - flags: ::c_int) -> ::c_int; -} - -cfg_if! { - if #[cfg(target_arch = "x86")] { - mod x86; - pub use self::x86::*; - } else if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else { - // Unknown target_arch - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/x86_64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/x86_64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/x86_64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/x86_64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -pub type c_long = i64; -pub type c_ulong = u64; -pub type time_t = i64; -pub type suseconds_t = i64; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_size: ::off_t, - pub st_blocks: ::blkcnt_t, - pub st_blksize: ::blksize_t, - pub st_flags: ::fflags_t, - pub st_gen: ::uint32_t, - pub st_lspare: ::int32_t, - pub st_birthtime: ::time_t, - pub st_birthtime_nsec: ::c_long, - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/x86.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/x86.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/x86.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/freebsd/x86.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -pub type c_long = i32; -pub type c_ulong = u32; -pub type time_t = i32; -pub type suseconds_t = i32; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_size: ::off_t, - pub st_blocks: ::blkcnt_t, - pub st_blksize: ::blksize_t, - pub st_flags: ::fflags_t, - pub st_gen: ::uint32_t, - pub st_lspare: ::int32_t, - pub st_birthtime: ::time_t, - pub st_birthtime_nsec: ::c_long, - __unused: [u8; 8], - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/freebsdlike/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,909 +0,0 @@ -pub type dev_t = u32; -pub type mode_t = u16; -pub type pthread_attr_t = *mut ::c_void; -pub type rlim_t = i64; -pub type pthread_mutex_t = *mut ::c_void; -pub type pthread_mutexattr_t = *mut ::c_void; -pub type pthread_cond_t = *mut ::c_void; -pub type pthread_condattr_t = *mut ::c_void; -pub type pthread_rwlock_t = *mut ::c_void; -pub type pthread_key_t = ::c_int; -pub type tcflag_t = ::c_uint; -pub type speed_t = ::c_uint; -pub type nl_item = ::c_int; -pub type id_t = i64; -pub type sem_t = _sem; - -pub enum timezone {} - -s! { - pub struct utmpx { - pub ut_type: ::c_short, - pub ut_tv: ::timeval, - pub ut_id: [::c_char; 8], - pub ut_pid: ::pid_t, - pub ut_user: [::c_char; 32], - pub ut_line: [::c_char; 16], - pub ut_host: [::c_char; 128], - pub __ut_spare: [::c_char; 64], - } - - pub struct glob_t { - pub gl_pathc: ::size_t, - pub gl_matchc: ::size_t, - pub gl_offs: ::size_t, - pub gl_flags: ::c_int, - pub gl_pathv: *mut *mut ::c_char, - __unused3: *mut ::c_void, - __unused4: *mut ::c_void, - __unused5: *mut ::c_void, - __unused6: *mut ::c_void, - __unused7: *mut ::c_void, - __unused8: *mut ::c_void, - } - - pub struct kevent { - pub ident: ::uintptr_t, - pub filter: ::c_short, - pub flags: ::c_ushort, - pub fflags: ::c_uint, - pub data: ::intptr_t, - pub udata: *mut ::c_void, - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: ::sa_family_t, - __ss_pad1: [u8; 6], - __ss_align: i64, - __ss_pad2: [u8; 112], - } - - pub struct addrinfo { - pub ai_flags: ::c_int, - pub ai_family: ::c_int, - pub ai_socktype: ::c_int, - pub ai_protocol: ::c_int, - pub ai_addrlen: ::socklen_t, - pub ai_canonname: *mut ::c_char, - pub ai_addr: *mut ::sockaddr, - pub ai_next: *mut addrinfo, - } - - pub struct sigset_t { - bits: [u32; 4], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_errno: ::c_int, - pub si_code: ::c_int, - pub si_pid: ::pid_t, - pub si_uid: ::uid_t, - pub si_status: ::c_int, - pub si_addr: *mut ::c_void, - _pad: [::c_int; 12], - } - - pub struct sigaction { - pub sa_sigaction: ::sighandler_t, - pub sa_flags: ::c_int, - pub sa_mask: sigset_t, - } - - pub struct stack_t { - // In FreeBSD 11 and later, ss_sp is actually a void* - pub ss_sp: *mut ::c_char, - pub ss_size: ::size_t, - pub ss_flags: ::c_int, - } - - pub struct sched_param { - pub sched_priority: ::c_int, - } - - pub struct Dl_info { - pub dli_fname: *const ::c_char, - pub dli_fbase: *mut ::c_void, - pub dli_sname: *const ::c_char, - pub dli_saddr: *mut ::c_void, - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: ::sa_family_t, - pub sin_port: ::in_port_t, - pub sin_addr: ::in_addr, - pub sin_zero: [::c_char; 8], - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_cc: [::cc_t; ::NCCS], - pub c_ispeed: ::speed_t, - pub c_ospeed: ::speed_t, - } - - pub struct flock { - pub l_start: ::off_t, - pub l_len: ::off_t, - pub l_pid: ::pid_t, - pub l_type: ::c_short, - pub l_whence: ::c_short, - #[cfg(not(target_os = "dragonfly"))] - pub l_sysid: ::c_int, - } - - pub struct sf_hdtr { - pub headers: *mut ::iovec, - pub hdr_cnt: ::c_int, - pub trailers: *mut ::iovec, - pub trl_cnt: ::c_int, - } - - pub struct lconv { - pub decimal_point: *mut ::c_char, - pub thousands_sep: *mut ::c_char, - pub grouping: *mut ::c_char, - pub int_curr_symbol: *mut ::c_char, - pub currency_symbol: *mut ::c_char, - pub mon_decimal_point: *mut ::c_char, - pub mon_thousands_sep: *mut ::c_char, - pub mon_grouping: *mut ::c_char, - pub positive_sign: *mut ::c_char, - pub negative_sign: *mut ::c_char, - pub int_frac_digits: ::c_char, - pub frac_digits: ::c_char, - pub p_cs_precedes: ::c_char, - pub p_sep_by_space: ::c_char, - pub n_cs_precedes: ::c_char, - pub n_sep_by_space: ::c_char, - pub p_sign_posn: ::c_char, - pub n_sign_posn: ::c_char, - pub int_p_cs_precedes: ::c_char, - pub int_n_cs_precedes: ::c_char, - pub int_p_sep_by_space: ::c_char, - pub int_n_sep_by_space: ::c_char, - pub int_p_sign_posn: ::c_char, - pub int_n_sign_posn: ::c_char, - } - - // internal structure has changed over time - pub struct _sem { - data: [u32; 4], - } -} - -pub const AIO_LISTIO_MAX: ::c_int = 16; -pub const AIO_CANCELED: ::c_int = 1; -pub const AIO_NOTCANCELED: ::c_int = 2; -pub const AIO_ALLDONE: ::c_int = 3; -pub const LIO_NOP: ::c_int = 0; -pub const LIO_WRITE: ::c_int = 1; -pub const LIO_READ: ::c_int = 2; -pub const LIO_WAIT: ::c_int = 1; -pub const LIO_NOWAIT: ::c_int = 0; - -pub const SIGEV_NONE: ::c_int = 0; -pub const SIGEV_SIGNAL: ::c_int = 1; -pub const SIGEV_THREAD: ::c_int = 2; -pub const SIGEV_KEVENT: ::c_int = 3; - -pub const EMPTY: ::c_short = 0; -pub const BOOT_TIME: ::c_short = 1; -pub const OLD_TIME: ::c_short = 2; -pub const NEW_TIME: ::c_short = 3; -pub const USER_PROCESS: ::c_short = 4; -pub const INIT_PROCESS: ::c_short = 5; -pub const LOGIN_PROCESS: ::c_short = 6; -pub const DEAD_PROCESS: ::c_short = 7; -pub const SHUTDOWN_TIME: ::c_short = 8; - -pub const LC_COLLATE_MASK: ::c_int = (1 << 0); -pub const LC_CTYPE_MASK: ::c_int = (1 << 1); -pub const LC_MESSAGES_MASK: ::c_int = (1 << 2); -pub const LC_MONETARY_MASK: ::c_int = (1 << 3); -pub const LC_NUMERIC_MASK: ::c_int = (1 << 4); -pub const LC_TIME_MASK: ::c_int = (1 << 5); -pub const LC_ALL_MASK: ::c_int = LC_COLLATE_MASK - | LC_CTYPE_MASK - | LC_MESSAGES_MASK - | LC_MONETARY_MASK - | LC_NUMERIC_MASK - | LC_TIME_MASK; - -pub const CODESET: ::nl_item = 0; -pub const D_T_FMT: ::nl_item = 1; -pub const D_FMT: ::nl_item = 2; -pub const T_FMT: ::nl_item = 3; -pub const T_FMT_AMPM: ::nl_item = 4; -pub const AM_STR: ::nl_item = 5; -pub const PM_STR: ::nl_item = 6; - -pub const DAY_1: ::nl_item = 7; -pub const DAY_2: ::nl_item = 8; -pub const DAY_3: ::nl_item = 9; -pub const DAY_4: ::nl_item = 10; -pub const DAY_5: ::nl_item = 11; -pub const DAY_6: ::nl_item = 12; -pub const DAY_7: ::nl_item = 13; - -pub const ABDAY_1: ::nl_item = 14; -pub const ABDAY_2: ::nl_item = 15; -pub const ABDAY_3: ::nl_item = 16; -pub const ABDAY_4: ::nl_item = 17; -pub const ABDAY_5: ::nl_item = 18; -pub const ABDAY_6: ::nl_item = 19; -pub const ABDAY_7: ::nl_item = 20; - -pub const MON_1: ::nl_item = 21; -pub const MON_2: ::nl_item = 22; -pub const MON_3: ::nl_item = 23; -pub const MON_4: ::nl_item = 24; -pub const MON_5: ::nl_item = 25; -pub const MON_6: ::nl_item = 26; -pub const MON_7: ::nl_item = 27; -pub const MON_8: ::nl_item = 28; -pub const MON_9: ::nl_item = 29; -pub const MON_10: ::nl_item = 30; -pub const MON_11: ::nl_item = 31; -pub const MON_12: ::nl_item = 32; - -pub const ABMON_1: ::nl_item = 33; -pub const ABMON_2: ::nl_item = 34; -pub const ABMON_3: ::nl_item = 35; -pub const ABMON_4: ::nl_item = 36; -pub const ABMON_5: ::nl_item = 37; -pub const ABMON_6: ::nl_item = 38; -pub const ABMON_7: ::nl_item = 39; -pub const ABMON_8: ::nl_item = 40; -pub const ABMON_9: ::nl_item = 41; -pub const ABMON_10: ::nl_item = 42; -pub const ABMON_11: ::nl_item = 43; -pub const ABMON_12: ::nl_item = 44; - -pub const ERA: ::nl_item = 45; -pub const ERA_D_FMT: ::nl_item = 46; -pub const ERA_D_T_FMT: ::nl_item = 47; -pub const ERA_T_FMT: ::nl_item = 48; -pub const ALT_DIGITS: ::nl_item = 49; - -pub const RADIXCHAR: ::nl_item = 50; -pub const THOUSEP: ::nl_item = 51; - -pub const YESEXPR: ::nl_item = 52; -pub const NOEXPR: ::nl_item = 53; - -pub const YESSTR: ::nl_item = 54; -pub const NOSTR: ::nl_item = 55; - -pub const CRNCYSTR: ::nl_item = 56; - -pub const D_MD_ORDER: ::nl_item = 57; - -pub const ALTMON_1: ::nl_item = 58; -pub const ALTMON_2: ::nl_item = 59; -pub const ALTMON_3: ::nl_item = 60; -pub const ALTMON_4: ::nl_item = 61; -pub const ALTMON_5: ::nl_item = 62; -pub const ALTMON_6: ::nl_item = 63; -pub const ALTMON_7: ::nl_item = 64; -pub const ALTMON_8: ::nl_item = 65; -pub const ALTMON_9: ::nl_item = 66; -pub const ALTMON_10: ::nl_item = 67; -pub const ALTMON_11: ::nl_item = 68; -pub const ALTMON_12: ::nl_item = 69; - -pub const EXIT_FAILURE: ::c_int = 1; -pub const EXIT_SUCCESS: ::c_int = 0; -pub const EOF: ::c_int = -1; -pub const SEEK_SET: ::c_int = 0; -pub const SEEK_CUR: ::c_int = 1; -pub const SEEK_END: ::c_int = 2; -pub const _IOFBF: ::c_int = 0; -pub const _IONBF: ::c_int = 2; -pub const _IOLBF: ::c_int = 1; -pub const BUFSIZ: ::c_uint = 1024; -pub const FOPEN_MAX: ::c_uint = 20; -pub const FILENAME_MAX: ::c_uint = 1024; -pub const L_tmpnam: ::c_uint = 1024; -pub const TMP_MAX: ::c_uint = 308915776; - -pub const O_RDONLY: ::c_int = 0; -pub const O_WRONLY: ::c_int = 1; -pub const O_RDWR: ::c_int = 2; -pub const O_ACCMODE: ::c_int = 3; -pub const O_APPEND: ::c_int = 8; -pub const O_CREAT: ::c_int = 512; -pub const O_EXCL: ::c_int = 2048; -pub const O_NOCTTY: ::c_int = 32768; -pub const O_TRUNC: ::c_int = 1024; -pub const S_IFIFO: mode_t = 4096; -pub const S_IFCHR: mode_t = 8192; -pub const S_IFBLK: mode_t = 24576; -pub const S_IFDIR: mode_t = 16384; -pub const S_IFREG: mode_t = 32768; -pub const S_IFLNK: mode_t = 40960; -pub const S_IFSOCK: mode_t = 49152; -pub const S_IFMT: mode_t = 61440; -pub const S_IEXEC: mode_t = 64; -pub const S_IWRITE: mode_t = 128; -pub const S_IREAD: mode_t = 256; -pub const S_IRWXU: mode_t = 448; -pub const S_IXUSR: mode_t = 64; -pub const S_IWUSR: mode_t = 128; -pub const S_IRUSR: mode_t = 256; -pub const S_IRWXG: mode_t = 56; -pub const S_IXGRP: mode_t = 8; -pub const S_IWGRP: mode_t = 16; -pub const S_IRGRP: mode_t = 32; -pub const S_IRWXO: mode_t = 7; -pub const S_IXOTH: mode_t = 1; -pub const S_IWOTH: mode_t = 2; -pub const S_IROTH: mode_t = 4; -pub const F_OK: ::c_int = 0; -pub const R_OK: ::c_int = 4; -pub const W_OK: ::c_int = 2; -pub const X_OK: ::c_int = 1; -pub const STDIN_FILENO: ::c_int = 0; -pub const STDOUT_FILENO: ::c_int = 1; -pub const STDERR_FILENO: ::c_int = 2; -pub const F_LOCK: ::c_int = 1; -pub const F_TEST: ::c_int = 3; -pub const F_TLOCK: ::c_int = 2; -pub const F_ULOCK: ::c_int = 0; -pub const F_DUPFD_CLOEXEC: ::c_int = 17; -pub const SIGHUP: ::c_int = 1; -pub const SIGINT: ::c_int = 2; -pub const SIGQUIT: ::c_int = 3; -pub const SIGILL: ::c_int = 4; -pub const SIGABRT: ::c_int = 6; -pub const SIGEMT: ::c_int = 7; -pub const SIGFPE: ::c_int = 8; -pub const SIGKILL: ::c_int = 9; -pub const SIGSEGV: ::c_int = 11; -pub const SIGPIPE: ::c_int = 13; -pub const SIGALRM: ::c_int = 14; -pub const SIGTERM: ::c_int = 15; - -pub const PROT_NONE: ::c_int = 0; -pub const PROT_READ: ::c_int = 1; -pub const PROT_WRITE: ::c_int = 2; -pub const PROT_EXEC: ::c_int = 4; - -pub const MAP_FILE: ::c_int = 0x0000; -pub const MAP_SHARED: ::c_int = 0x0001; -pub const MAP_PRIVATE: ::c_int = 0x0002; -pub const MAP_FIXED: ::c_int = 0x0010; -pub const MAP_ANON: ::c_int = 0x1000; - -pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void; - -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const MS_SYNC: ::c_int = 0x0000; -pub const MS_ASYNC: ::c_int = 0x0001; -pub const MS_INVALIDATE: ::c_int = 0x0002; - -pub const EPERM: ::c_int = 1; -pub const ENOENT: ::c_int = 2; -pub const ESRCH: ::c_int = 3; -pub const EINTR: ::c_int = 4; -pub const EIO: ::c_int = 5; -pub const ENXIO: ::c_int = 6; -pub const E2BIG: ::c_int = 7; -pub const ENOEXEC: ::c_int = 8; -pub const EBADF: ::c_int = 9; -pub const ECHILD: ::c_int = 10; -pub const EDEADLK: ::c_int = 11; -pub const ENOMEM: ::c_int = 12; -pub const EACCES: ::c_int = 13; -pub const EFAULT: ::c_int = 14; -pub const ENOTBLK: ::c_int = 15; -pub const EBUSY: ::c_int = 16; -pub const EEXIST: ::c_int = 17; -pub const EXDEV: ::c_int = 18; -pub const ENODEV: ::c_int = 19; -pub const ENOTDIR: ::c_int = 20; -pub const EISDIR: ::c_int = 21; -pub const EINVAL: ::c_int = 22; -pub const ENFILE: ::c_int = 23; -pub const EMFILE: ::c_int = 24; -pub const ENOTTY: ::c_int = 25; -pub const ETXTBSY: ::c_int = 26; -pub const EFBIG: ::c_int = 27; -pub const ENOSPC: ::c_int = 28; -pub const ESPIPE: ::c_int = 29; -pub const EROFS: ::c_int = 30; -pub const EMLINK: ::c_int = 31; -pub const EPIPE: ::c_int = 32; -pub const EDOM: ::c_int = 33; -pub const ERANGE: ::c_int = 34; -pub const EAGAIN: ::c_int = 35; -pub const EWOULDBLOCK: ::c_int = 35; -pub const EINPROGRESS: ::c_int = 36; -pub const EALREADY: ::c_int = 37; -pub const ENOTSOCK: ::c_int = 38; -pub const EDESTADDRREQ: ::c_int = 39; -pub const EMSGSIZE: ::c_int = 40; -pub const EPROTOTYPE: ::c_int = 41; -pub const ENOPROTOOPT: ::c_int = 42; -pub const EPROTONOSUPPORT: ::c_int = 43; -pub const ESOCKTNOSUPPORT: ::c_int = 44; -pub const EOPNOTSUPP: ::c_int = 45; -pub const ENOTSUP: ::c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: ::c_int = 46; -pub const EAFNOSUPPORT: ::c_int = 47; -pub const EADDRINUSE: ::c_int = 48; -pub const EADDRNOTAVAIL: ::c_int = 49; -pub const ENETDOWN: ::c_int = 50; -pub const ENETUNREACH: ::c_int = 51; -pub const ENETRESET: ::c_int = 52; -pub const ECONNABORTED: ::c_int = 53; -pub const ECONNRESET: ::c_int = 54; -pub const ENOBUFS: ::c_int = 55; -pub const EISCONN: ::c_int = 56; -pub const ENOTCONN: ::c_int = 57; -pub const ESHUTDOWN: ::c_int = 58; -pub const ETOOMANYREFS: ::c_int = 59; -pub const ETIMEDOUT: ::c_int = 60; -pub const ECONNREFUSED: ::c_int = 61; -pub const ELOOP: ::c_int = 62; -pub const ENAMETOOLONG: ::c_int = 63; -pub const EHOSTDOWN: ::c_int = 64; -pub const EHOSTUNREACH: ::c_int = 65; -pub const ENOTEMPTY: ::c_int = 66; -pub const EPROCLIM: ::c_int = 67; -pub const EUSERS: ::c_int = 68; -pub const EDQUOT: ::c_int = 69; -pub const ESTALE: ::c_int = 70; -pub const EREMOTE: ::c_int = 71; -pub const EBADRPC: ::c_int = 72; -pub const ERPCMISMATCH: ::c_int = 73; -pub const EPROGUNAVAIL: ::c_int = 74; -pub const EPROGMISMATCH: ::c_int = 75; -pub const EPROCUNAVAIL: ::c_int = 76; -pub const ENOLCK: ::c_int = 77; -pub const ENOSYS: ::c_int = 78; -pub const EFTYPE: ::c_int = 79; -pub const EAUTH: ::c_int = 80; -pub const ENEEDAUTH: ::c_int = 81; -pub const EIDRM: ::c_int = 82; -pub const ENOMSG: ::c_int = 83; -pub const EOVERFLOW: ::c_int = 84; -pub const ECANCELED: ::c_int = 85; -pub const EILSEQ: ::c_int = 86; -pub const ENOATTR: ::c_int = 87; -pub const EDOOFUS: ::c_int = 88; -pub const EBADMSG: ::c_int = 89; -pub const EMULTIHOP: ::c_int = 90; -pub const ENOLINK: ::c_int = 91; -pub const EPROTO: ::c_int = 92; - -pub const EAI_SYSTEM: ::c_int = 11; - -pub const F_DUPFD: ::c_int = 0; -pub const F_GETFD: ::c_int = 1; -pub const F_SETFD: ::c_int = 2; -pub const F_GETFL: ::c_int = 3; -pub const F_SETFL: ::c_int = 4; - -pub const SIGTRAP: ::c_int = 5; - -pub const GLOB_APPEND : ::c_int = 0x0001; -pub const GLOB_DOOFFS : ::c_int = 0x0002; -pub const GLOB_ERR : ::c_int = 0x0004; -pub const GLOB_MARK : ::c_int = 0x0008; -pub const GLOB_NOCHECK : ::c_int = 0x0010; -pub const GLOB_NOSORT : ::c_int = 0x0020; -pub const GLOB_NOESCAPE: ::c_int = 0x2000; - -pub const GLOB_NOSPACE : ::c_int = -1; -pub const GLOB_ABORTED : ::c_int = -2; -pub const GLOB_NOMATCH : ::c_int = -3; - -pub const POSIX_MADV_NORMAL: ::c_int = 0; -pub const POSIX_MADV_RANDOM: ::c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: ::c_int = 2; -pub const POSIX_MADV_WILLNEED: ::c_int = 3; -pub const POSIX_MADV_DONTNEED: ::c_int = 4; - -pub const _SC_IOV_MAX: ::c_int = 56; -pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 70; -pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 71; -pub const _SC_LOGIN_NAME_MAX: ::c_int = 73; -pub const _SC_MQ_PRIO_MAX: ::c_int = 75; -pub const _SC_NPROCESSORS_ONLN: ::c_int = 58; -pub const _SC_THREAD_ATTR_STACKADDR: ::c_int = 82; -pub const _SC_THREAD_ATTR_STACKSIZE: ::c_int = 83; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: ::c_int = 85; -pub const _SC_THREAD_KEYS_MAX: ::c_int = 86; -pub const _SC_THREAD_PRIO_INHERIT: ::c_int = 87; -pub const _SC_THREAD_PRIO_PROTECT: ::c_int = 88; -pub const _SC_THREAD_PRIORITY_SCHEDULING: ::c_int = 89; -pub const _SC_THREAD_PROCESS_SHARED: ::c_int = 90; -pub const _SC_THREAD_SAFE_FUNCTIONS: ::c_int = 91; -pub const _SC_THREAD_STACK_MIN: ::c_int = 93; -pub const _SC_THREAD_THREADS_MAX: ::c_int = 94; -pub const _SC_THREADS: ::c_int = 96; -pub const _SC_TTY_NAME_MAX: ::c_int = 101; -pub const _SC_ATEXIT_MAX: ::c_int = 107; -pub const _SC_XOPEN_CRYPT: ::c_int = 108; -pub const _SC_XOPEN_ENH_I18N: ::c_int = 109; -pub const _SC_XOPEN_LEGACY: ::c_int = 110; -pub const _SC_XOPEN_REALTIME: ::c_int = 111; -pub const _SC_XOPEN_REALTIME_THREADS: ::c_int = 112; -pub const _SC_XOPEN_SHM: ::c_int = 113; -pub const _SC_XOPEN_UNIX: ::c_int = 115; -pub const _SC_XOPEN_VERSION: ::c_int = 116; -pub const _SC_XOPEN_XCU_VERSION: ::c_int = 117; - -pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0; -pub const PTHREAD_CREATE_DETACHED: ::c_int = 1; - -pub const RLIMIT_CPU: ::c_int = 0; -pub const RLIMIT_FSIZE: ::c_int = 1; -pub const RLIMIT_DATA: ::c_int = 2; -pub const RLIMIT_STACK: ::c_int = 3; -pub const RLIMIT_CORE: ::c_int = 4; -pub const RLIMIT_RSS: ::c_int = 5; -pub const RLIMIT_MEMLOCK: ::c_int = 6; -pub const RLIMIT_NPROC: ::c_int = 7; -pub const RLIMIT_NOFILE: ::c_int = 8; -pub const RLIMIT_SBSIZE: ::c_int = 9; -pub const RLIMIT_VMEM: ::c_int = 10; -pub const RLIMIT_AS: ::c_int = RLIMIT_VMEM; -pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; - -pub const RUSAGE_SELF: ::c_int = 0; -pub const RUSAGE_CHILDREN: ::c_int = -1; - -pub const MADV_NORMAL: ::c_int = 0; -pub const MADV_RANDOM: ::c_int = 1; -pub const MADV_SEQUENTIAL: ::c_int = 2; -pub const MADV_WILLNEED: ::c_int = 3; -pub const MADV_DONTNEED: ::c_int = 4; -pub const MADV_FREE: ::c_int = 5; -pub const MADV_NOSYNC: ::c_int = 6; -pub const MADV_AUTOSYNC: ::c_int = 7; -pub const MADV_NOCORE: ::c_int = 8; -pub const MADV_CORE: ::c_int = 9; - -pub const MINCORE_INCORE: ::c_int = 0x1; -pub const MINCORE_REFERENCED: ::c_int = 0x2; -pub const MINCORE_MODIFIED: ::c_int = 0x4; -pub const MINCORE_REFERENCED_OTHER: ::c_int = 0x8; -pub const MINCORE_MODIFIED_OTHER: ::c_int = 0x10; -pub const MINCORE_SUPER: ::c_int = 0x20; - -pub const AF_INET: ::c_int = 2; -pub const AF_INET6: ::c_int = 28; -pub const AF_UNIX: ::c_int = 1; -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; -pub const SOCK_RAW: ::c_int = 3; -pub const SOCK_SEQPACKET: ::c_int = 5; -pub const IPPROTO_TCP: ::c_int = 6; -pub const IPPROTO_IP: ::c_int = 0; -pub const IPPROTO_IPV6: ::c_int = 41; -pub const IP_MULTICAST_TTL: ::c_int = 10; -pub const IP_MULTICAST_LOOP: ::c_int = 11; -pub const IP_TTL: ::c_int = 4; -pub const IP_HDRINCL: ::c_int = 2; -pub const IP_ADD_MEMBERSHIP: ::c_int = 12; -pub const IP_DROP_MEMBERSHIP: ::c_int = 13; -pub const IPV6_JOIN_GROUP: ::c_int = 12; -pub const IPV6_LEAVE_GROUP: ::c_int = 13; - -pub const TCP_NODELAY: ::c_int = 1; -pub const TCP_KEEPIDLE: ::c_int = 256; -pub const SOL_SOCKET: ::c_int = 0xffff; -pub const SO_DEBUG: ::c_int = 0x01; -pub const SO_ACCEPTCONN: ::c_int = 0x0002; -pub const SO_REUSEADDR: ::c_int = 0x0004; -pub const SO_KEEPALIVE: ::c_int = 0x0008; -pub const SO_DONTROUTE: ::c_int = 0x0010; -pub const SO_BROADCAST: ::c_int = 0x0020; -pub const SO_USELOOPBACK: ::c_int = 0x0040; -pub const SO_LINGER: ::c_int = 0x0080; -pub const SO_OOBINLINE: ::c_int = 0x0100; -pub const SO_REUSEPORT: ::c_int = 0x0200; -pub const SO_SNDBUF: ::c_int = 0x1001; -pub const SO_RCVBUF: ::c_int = 0x1002; -pub const SO_SNDLOWAT: ::c_int = 0x1003; -pub const SO_RCVLOWAT: ::c_int = 0x1004; -pub const SO_SNDTIMEO: ::c_int = 0x1005; -pub const SO_RCVTIMEO: ::c_int = 0x1006; -pub const SO_ERROR: ::c_int = 0x1007; -pub const SO_TYPE: ::c_int = 0x1008; - -pub const IFF_LOOPBACK: ::c_int = 0x8; - -pub const SHUT_RD: ::c_int = 0; -pub const SHUT_WR: ::c_int = 1; -pub const SHUT_RDWR: ::c_int = 2; - -pub const LOCK_SH: ::c_int = 1; -pub const LOCK_EX: ::c_int = 2; -pub const LOCK_NB: ::c_int = 4; -pub const LOCK_UN: ::c_int = 8; - -pub const O_SYNC: ::c_int = 128; -pub const O_NONBLOCK: ::c_int = 4; - -pub const MAP_COPY: ::c_int = 0x0002; -pub const MAP_RENAME: ::c_int = 0x0020; -pub const MAP_NORESERVE: ::c_int = 0x0040; -pub const MAP_HASSEMAPHORE: ::c_int = 0x0200; -pub const MAP_STACK: ::c_int = 0x0400; -pub const MAP_NOSYNC: ::c_int = 0x0800; -pub const MAP_NOCORE: ::c_int = 0x020000; - -pub const IPPROTO_RAW: ::c_int = 255; - -pub const _SC_ARG_MAX: ::c_int = 1; -pub const _SC_CHILD_MAX: ::c_int = 2; -pub const _SC_CLK_TCK: ::c_int = 3; -pub const _SC_NGROUPS_MAX: ::c_int = 4; -pub const _SC_OPEN_MAX: ::c_int = 5; -pub const _SC_JOB_CONTROL: ::c_int = 6; -pub const _SC_SAVED_IDS: ::c_int = 7; -pub const _SC_VERSION: ::c_int = 8; -pub const _SC_BC_BASE_MAX: ::c_int = 9; -pub const _SC_BC_DIM_MAX: ::c_int = 10; -pub const _SC_BC_SCALE_MAX: ::c_int = 11; -pub const _SC_BC_STRING_MAX: ::c_int = 12; -pub const _SC_COLL_WEIGHTS_MAX: ::c_int = 13; -pub const _SC_EXPR_NEST_MAX: ::c_int = 14; -pub const _SC_LINE_MAX: ::c_int = 15; -pub const _SC_RE_DUP_MAX: ::c_int = 16; -pub const _SC_2_VERSION: ::c_int = 17; -pub const _SC_2_C_BIND: ::c_int = 18; -pub const _SC_2_C_DEV: ::c_int = 19; -pub const _SC_2_CHAR_TERM: ::c_int = 20; -pub const _SC_2_FORT_DEV: ::c_int = 21; -pub const _SC_2_FORT_RUN: ::c_int = 22; -pub const _SC_2_LOCALEDEF: ::c_int = 23; -pub const _SC_2_SW_DEV: ::c_int = 24; -pub const _SC_2_UPE: ::c_int = 25; -pub const _SC_STREAM_MAX: ::c_int = 26; -pub const _SC_TZNAME_MAX: ::c_int = 27; -pub const _SC_ASYNCHRONOUS_IO: ::c_int = 28; -pub const _SC_MAPPED_FILES: ::c_int = 29; -pub const _SC_MEMLOCK: ::c_int = 30; -pub const _SC_MEMLOCK_RANGE: ::c_int = 31; -pub const _SC_MEMORY_PROTECTION: ::c_int = 32; -pub const _SC_MESSAGE_PASSING: ::c_int = 33; -pub const _SC_PRIORITIZED_IO: ::c_int = 34; -pub const _SC_PRIORITY_SCHEDULING: ::c_int = 35; -pub const _SC_REALTIME_SIGNALS: ::c_int = 36; -pub const _SC_SEMAPHORES: ::c_int = 37; -pub const _SC_FSYNC: ::c_int = 38; -pub const _SC_SHARED_MEMORY_OBJECTS: ::c_int = 39; -pub const _SC_SYNCHRONIZED_IO: ::c_int = 40; -pub const _SC_TIMERS: ::c_int = 41; -pub const _SC_AIO_LISTIO_MAX: ::c_int = 42; -pub const _SC_AIO_MAX: ::c_int = 43; -pub const _SC_AIO_PRIO_DELTA_MAX: ::c_int = 44; -pub const _SC_DELAYTIMER_MAX: ::c_int = 45; -pub const _SC_MQ_OPEN_MAX: ::c_int = 46; -pub const _SC_PAGESIZE: ::c_int = 47; -pub const _SC_PAGE_SIZE: ::c_int = _SC_PAGESIZE; -pub const _SC_RTSIG_MAX: ::c_int = 48; -pub const _SC_SEM_NSEMS_MAX: ::c_int = 49; -pub const _SC_SEM_VALUE_MAX: ::c_int = 50; -pub const _SC_SIGQUEUE_MAX: ::c_int = 51; -pub const _SC_TIMER_MAX: ::c_int = 52; -pub const _SC_HOST_NAME_MAX: ::c_int = 72; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 0 as *mut _; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 0 as *mut _; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 0 as *mut _; -pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2; -pub const PTHREAD_MUTEX_NORMAL: ::c_int = 3; -pub const PTHREAD_MUTEX_ADAPTIVE_NP: ::c_int = 4; -pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_ERRORCHECK; - -pub const SCHED_FIFO: ::c_int = 1; -pub const SCHED_OTHER: ::c_int = 2; -pub const SCHED_RR: ::c_int = 3; - -pub const FD_SETSIZE: usize = 1024; - -pub const ST_NOSUID: ::c_ulong = 2; - -pub const NI_MAXHOST: ::size_t = 1025; - -pub const RTLD_LOCAL: ::c_int = 0; -pub const RTLD_NODELETE: ::c_int = 0x1000; -pub const RTLD_NOLOAD: ::c_int = 0x2000; -pub const RTLD_GLOBAL: ::c_int = 0x100; - -pub const LOG_NTP: ::c_int = 12 << 3; -pub const LOG_SECURITY: ::c_int = 13 << 3; -pub const LOG_CONSOLE: ::c_int = 14 << 3; -pub const LOG_NFACILITIES: ::c_int = 24; - -pub const TIOCGWINSZ: ::c_ulong = 0x40087468; -pub const TIOCSWINSZ: ::c_ulong = 0x80087467; - -pub const SEM_FAILED: *mut sem_t = 0 as *mut sem_t; - -f! { - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { - status >> 8 - } - - pub fn WIFSIGNALED(status: ::c_int) -> bool { - (status & 0o177) != 0o177 && (status & 0o177) != 0 - } - - pub fn WIFSTOPPED(status: ::c_int) -> bool { - (status & 0o177) == 0o177 - } -} - -extern { - pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int; - pub fn endutxent(); - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn setutxent(); - pub fn getutxuser(user: *const ::c_char) -> *mut utmpx; - pub fn setutxdb(_type: ::c_int, file: *const ::c_char) -> ::c_int; -} - -#[link(name = "util")] -extern { - pub fn aio_read(aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_fsync(op: ::c_int, aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_error(aiocbp: *const aiocb) -> ::c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ::ssize_t; - pub fn aio_suspend(aiocb_list: *const *const aiocb, nitems: ::c_int, - timeout: *const ::timespec) -> ::c_int; - pub fn aio_cancel(fd: ::c_int, aiocbp: *mut aiocb) -> ::c_int; - pub fn lio_listio(mode: ::c_int, aiocb_list: *const *mut aiocb, - nitems: ::c_int, sevp: *mut sigevent) -> ::c_int; - pub fn aio_waitcomplete(iocbp: *mut *mut aiocb, - timeout: *mut ::timespec) -> ::ssize_t; - - pub fn getnameinfo(sa: *const ::sockaddr, - salen: ::socklen_t, - host: *mut ::c_char, - hostlen: ::size_t, - serv: *mut ::c_char, - servlen: ::size_t, - flags: ::c_int) -> ::c_int; - pub fn kevent(kq: ::c_int, - changelist: *const ::kevent, - nchanges: ::c_int, - eventlist: *mut ::kevent, - nevents: ::c_int, - timeout: *const ::timespec) -> ::c_int; - pub fn mincore(addr: *const ::c_void, len: ::size_t, - vec: *mut ::c_char) -> ::c_int; - pub fn sysctlnametomib(name: *const ::c_char, - mibp: *mut ::c_int, - sizep: *mut ::size_t) - -> ::c_int; - pub fn shm_open(name: *const ::c_char, oflag: ::c_int, mode: ::mode_t) - -> ::c_int; - pub fn sysctl(name: *const ::c_int, - namelen: ::c_uint, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *const ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn sysctlbyname(name: *const ::c_char, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *const ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn sched_setscheduler(pid: ::pid_t, - policy: ::c_int, - param: *const sched_param) -> ::c_int; - pub fn sched_getscheduler(pid: ::pid_t) -> ::c_int; - pub fn memrchr(cx: *const ::c_void, - c: ::c_int, - n: ::size_t) -> *mut ::c_void; - pub fn sendfile(fd: ::c_int, - s: ::c_int, - offset: ::off_t, - nbytes: ::size_t, - hdtr: *mut ::sf_hdtr, - sbytes: *mut ::off_t, - flags: ::c_int) -> ::c_int; - pub fn sigtimedwait(set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const ::timespec) -> ::c_int; - pub fn sigwaitinfo(set: *const sigset_t, - info: *mut siginfo_t) -> ::c_int; - pub fn openpty(amaster: *mut ::c_int, - aslave: *mut ::c_int, - name: *mut ::c_char, - termp: *mut termios, - winp: *mut ::winsize) -> ::c_int; - pub fn forkpty(amaster: *mut ::c_int, - name: *mut ::c_char, - termp: *mut termios, - winp: *mut ::winsize) -> ::pid_t; - pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; - pub fn duplocale(base: ::locale_t) -> ::locale_t; - pub fn freelocale(loc: ::locale_t) -> ::c_int; - pub fn newlocale(mask: ::c_int, - locale: *const ::c_char, - base: ::locale_t) -> ::locale_t; - pub fn uselocale(loc: ::locale_t) -> ::locale_t; - pub fn querylocale(mask: ::c_int, loc: ::locale_t) -> *const ::c_char; - pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char); - pub fn pthread_attr_get_np(tid: ::pthread_t, - attr: *mut ::pthread_attr_t) -> ::c_int; - pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t, - guardsize: *mut ::size_t) -> ::c_int; - pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t, - stackaddr: *mut *mut ::c_void, - stacksize: *mut ::size_t) -> ::c_int; - pub fn getpriority(which: ::c_int, who: ::c_int) -> ::c_int; - pub fn setpriority(which: ::c_int, who: ::c_int, prio: ::c_int) -> ::c_int; - - pub fn openat(dirfd: ::c_int, pathname: *const ::c_char, - flags: ::c_int, ...) -> ::c_int; - pub fn faccessat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::c_int, flags: ::c_int) -> ::c_int; - pub fn fchmodat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t, flags: ::c_int) -> ::c_int; - pub fn fchownat(dirfd: ::c_int, pathname: *const ::c_char, - owner: ::uid_t, group: ::gid_t, - flags: ::c_int) -> ::c_int; - pub fn fstatat(dirfd: ::c_int, pathname: *const ::c_char, - buf: *mut stat, flags: ::c_int) -> ::c_int; - pub fn linkat(olddirfd: ::c_int, oldpath: *const ::c_char, - newdirfd: ::c_int, newpath: *const ::c_char, - flags: ::c_int) -> ::c_int; - pub fn mkdirat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t) -> ::c_int; - pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t, dev: dev_t) -> ::c_int; - pub fn readlinkat(dirfd: ::c_int, pathname: *const ::c_char, - buf: *mut ::c_char, bufsiz: ::size_t) -> ::ssize_t; - pub fn renameat(olddirfd: ::c_int, oldpath: *const ::c_char, - newdirfd: ::c_int, newpath: *const ::c_char) - -> ::c_int; - pub fn symlinkat(target: *const ::c_char, newdirfd: ::c_int, - linkpath: *const ::c_char) -> ::c_int; - pub fn unlinkat(dirfd: ::c_int, pathname: *const ::c_char, - flags: ::c_int) -> ::c_int; - pub fn mkfifoat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t) -> ::c_int; - pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t, - clock_id: *mut clockid_t) -> ::c_int; - pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, - clock_id: clockid_t) -> ::c_int; - pub fn sethostname(name: *const ::c_char, len: ::c_int) -> ::c_int; - pub fn sem_timedwait(sem: *mut sem_t, - abstime: *const ::timespec) -> ::c_int; - pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, - abstime: *const ::timespec) -> ::c_int; -} - -cfg_if! { - if #[cfg(target_os = "freebsd")] { - mod freebsd; - pub use self::freebsd::*; - } else if #[cfg(target_os = "dragonfly")] { - mod dragonfly; - pub use self::dragonfly::*; - } else { - // ... - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,368 +0,0 @@ -use dox::mem; - -pub type c_char = i8; -pub type wchar_t = i32; -pub type off_t = i64; -pub type useconds_t = u32; -pub type blkcnt_t = i64; -pub type socklen_t = u32; -pub type sa_family_t = u8; -pub type pthread_t = ::uintptr_t; -pub type nfds_t = ::c_uint; - -s! { - pub struct sockaddr { - pub sa_len: u8, - pub sa_family: sa_family_t, - pub sa_data: [::c_char; 14], - } - - pub struct sockaddr_in6 { - pub sin6_len: u8, - pub sin6_family: sa_family_t, - pub sin6_port: ::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: ::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_un { - pub sun_len: u8, - pub sun_family: sa_family_t, - pub sun_path: [c_char; 104] - } - - pub struct passwd { - pub pw_name: *mut ::c_char, - pub pw_passwd: *mut ::c_char, - pub pw_uid: ::uid_t, - pub pw_gid: ::gid_t, - pub pw_change: ::time_t, - pub pw_class: *mut ::c_char, - pub pw_gecos: *mut ::c_char, - pub pw_dir: *mut ::c_char, - pub pw_shell: *mut ::c_char, - pub pw_expire: ::time_t, - - #[cfg(not(any(target_os = "macos", - target_os = "ios", - target_os = "netbsd", - target_os = "openbsd")))] - pub pw_fields: ::c_int, - } - - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut ::c_char, - pub ifa_flags: ::c_uint, - pub ifa_addr: *mut ::sockaddr, - pub ifa_netmask: *mut ::sockaddr, - pub ifa_dstaddr: *mut ::sockaddr, - pub ifa_data: *mut ::c_void - } - - pub struct fd_set { - #[cfg(all(target_pointer_width = "64", - any(target_os = "freebsd", target_os = "dragonfly")))] - fds_bits: [i64; FD_SETSIZE / 64], - #[cfg(not(all(target_pointer_width = "64", - any(target_os = "freebsd", target_os = "dragonfly"))))] - fds_bits: [i32; FD_SETSIZE / 32], - } - - pub struct tm { - pub tm_sec: ::c_int, - pub tm_min: ::c_int, - pub tm_hour: ::c_int, - pub tm_mday: ::c_int, - pub tm_mon: ::c_int, - pub tm_year: ::c_int, - pub tm_wday: ::c_int, - pub tm_yday: ::c_int, - pub tm_isdst: ::c_int, - pub tm_gmtoff: ::c_long, - pub tm_zone: *mut ::c_char, - } - - pub struct utsname { - #[cfg(not(target_os = "dragonfly"))] - pub sysname: [::c_char; 256], - #[cfg(target_os = "dragonfly")] - pub sysname: [::c_char; 32], - #[cfg(not(target_os = "dragonfly"))] - pub nodename: [::c_char; 256], - #[cfg(target_os = "dragonfly")] - pub nodename: [::c_char; 32], - #[cfg(not(target_os = "dragonfly"))] - pub release: [::c_char; 256], - #[cfg(target_os = "dragonfly")] - pub release: [::c_char; 32], - #[cfg(not(target_os = "dragonfly"))] - pub version: [::c_char; 256], - #[cfg(target_os = "dragonfly")] - pub version: [::c_char; 32], - #[cfg(not(target_os = "dragonfly"))] - pub machine: [::c_char; 256], - #[cfg(target_os = "dragonfly")] - pub machine: [::c_char; 32], - } - - pub struct msghdr { - pub msg_name: *mut ::c_void, - pub msg_namelen: ::socklen_t, - pub msg_iov: *mut ::iovec, - pub msg_iovlen: ::c_int, - pub msg_control: *mut ::c_void, - pub msg_controllen: ::socklen_t, - pub msg_flags: ::c_int, - } - - pub struct fsid_t { - __fsid_val: [::int32_t; 2], - } - - pub struct if_nameindex { - pub if_index: ::c_uint, - pub if_name: *mut ::c_char, - } -} - -pub const LC_ALL: ::c_int = 0; -pub const LC_COLLATE: ::c_int = 1; -pub const LC_CTYPE: ::c_int = 2; -pub const LC_MONETARY: ::c_int = 3; -pub const LC_NUMERIC: ::c_int = 4; -pub const LC_TIME: ::c_int = 5; -pub const LC_MESSAGES: ::c_int = 6; - -pub const FIOCLEX: ::c_ulong = 0x20006601; -pub const FIONBIO: ::c_ulong = 0x8004667e; - -pub const PATH_MAX: ::c_int = 1024; - -pub const SA_ONSTACK: ::c_int = 0x0001; -pub const SA_SIGINFO: ::c_int = 0x0040; -pub const SA_RESTART: ::c_int = 0x0002; -pub const SA_RESETHAND: ::c_int = 0x0004; -pub const SA_NOCLDSTOP: ::c_int = 0x0008; -pub const SA_NODEFER: ::c_int = 0x0010; -pub const SA_NOCLDWAIT: ::c_int = 0x0020; - -pub const SS_ONSTACK: ::c_int = 1; -pub const SS_DISABLE: ::c_int = 4; - -pub const SIGCHLD: ::c_int = 20; -pub const SIGBUS: ::c_int = 10; -pub const SIGUSR1: ::c_int = 30; -pub const SIGUSR2: ::c_int = 31; -pub const SIGCONT: ::c_int = 19; -pub const SIGSTOP: ::c_int = 17; -pub const SIGTSTP: ::c_int = 18; -pub const SIGURG: ::c_int = 16; -pub const SIGIO: ::c_int = 23; -pub const SIGSYS: ::c_int = 12; -pub const SIGTTIN: ::c_int = 21; -pub const SIGTTOU: ::c_int = 22; -pub const SIGXCPU: ::c_int = 24; -pub const SIGXFSZ: ::c_int = 25; -pub const SIGVTALRM: ::c_int = 26; -pub const SIGPROF: ::c_int = 27; -pub const SIGWINCH: ::c_int = 28; -pub const SIGINFO: ::c_int = 29; - -pub const SIG_SETMASK: ::c_int = 3; -pub const SIG_BLOCK: ::c_int = 0x1; -pub const SIG_UNBLOCK: ::c_int = 0x2; - -pub const IPV6_MULTICAST_LOOP: ::c_int = 11; -pub const IPV6_V6ONLY: ::c_int = 27; - -pub const ST_RDONLY: ::c_ulong = 1; - -pub const NCCS: usize = 20; - -pub const O_ASYNC: ::c_int = 0x40; -pub const O_FSYNC: ::c_int = 0x80; -pub const O_NDELAY: ::c_int = 0x4; -pub const O_NOFOLLOW: ::c_int = 0x100; - -pub const F_GETOWN: ::c_int = 5; -pub const F_SETOWN: ::c_int = 6; - -pub const MNT_FORCE: ::c_int = 0x80000; - -pub const Q_SYNC: ::c_int = 0x600; -pub const Q_QUOTAON: ::c_int = 0x100; -pub const Q_QUOTAOFF: ::c_int = 0x200; - -pub const TCIOFF: ::c_int = 3; -pub const TCION: ::c_int = 4; -pub const TCOOFF: ::c_int = 1; -pub const TCOON: ::c_int = 2; -pub const TCIFLUSH: ::c_int = 1; -pub const TCOFLUSH: ::c_int = 2; -pub const TCIOFLUSH: ::c_int = 3; -pub const TCSANOW: ::c_int = 0; -pub const TCSADRAIN: ::c_int = 1; -pub const TCSAFLUSH: ::c_int = 2; -pub const VEOF: usize = 0; -pub const VEOL: usize = 1; -pub const VEOL2: usize = 2; -pub const VERASE: usize = 3; -pub const VWERASE: usize = 4; -pub const VKILL: usize = 5; -pub const VREPRINT: usize = 6; -pub const VINTR: usize = 8; -pub const VQUIT: usize = 9; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 12; -pub const VSTOP: usize = 13; -pub const VLNEXT: usize = 14; -pub const VDISCARD: usize = 15; -pub const VMIN: usize = 16; -pub const VTIME: usize = 17; -pub const IGNBRK: ::tcflag_t = 0x00000001; -pub const BRKINT: ::tcflag_t = 0x00000002; -pub const IGNPAR: ::tcflag_t = 0x00000004; -pub const PARMRK: ::tcflag_t = 0x00000008; -pub const INPCK: ::tcflag_t = 0x00000010; -pub const ISTRIP: ::tcflag_t = 0x00000020; -pub const INLCR: ::tcflag_t = 0x00000040; -pub const IGNCR: ::tcflag_t = 0x00000080; -pub const ICRNL: ::tcflag_t = 0x00000100; -pub const IXON: ::tcflag_t = 0x00000200; -pub const IXOFF: ::tcflag_t = 0x00000400; -pub const IXANY: ::tcflag_t = 0x00000800; -pub const IMAXBEL: ::tcflag_t = 0x00002000; -pub const OPOST: ::tcflag_t = 0x1; -pub const ONLCR: ::tcflag_t = 0x2; -pub const CSIZE: ::tcflag_t = 0x00000300; -pub const CS5: ::tcflag_t = 0x00000000; -pub const CS6: ::tcflag_t = 0x00000100; -pub const CS7: ::tcflag_t = 0x00000200; -pub const CS8: ::tcflag_t = 0x00000300; -pub const CSTOPB: ::tcflag_t = 0x00000400; -pub const CREAD: ::tcflag_t = 0x00000800; -pub const PARENB: ::tcflag_t = 0x00001000; -pub const PARODD: ::tcflag_t = 0x00002000; -pub const HUPCL: ::tcflag_t = 0x00004000; -pub const CLOCAL: ::tcflag_t = 0x00008000; -pub const ECHOKE: ::tcflag_t = 0x00000001; -pub const ECHOE: ::tcflag_t = 0x00000002; -pub const ECHOK: ::tcflag_t = 0x00000004; -pub const ECHO: ::tcflag_t = 0x00000008; -pub const ECHONL: ::tcflag_t = 0x00000010; -pub const ECHOPRT: ::tcflag_t = 0x00000020; -pub const ECHOCTL: ::tcflag_t = 0x00000040; -pub const ISIG: ::tcflag_t = 0x00000080; -pub const ICANON: ::tcflag_t = 0x00000100; -pub const IEXTEN: ::tcflag_t = 0x00000400; -pub const EXTPROC: ::tcflag_t = 0x00000800; -pub const TOSTOP: ::tcflag_t = 0x00400000; -pub const FLUSHO: ::tcflag_t = 0x00800000; -pub const PENDIN: ::tcflag_t = 0x20000000; -pub const NOFLSH: ::tcflag_t = 0x80000000; - -pub const WNOHANG: ::c_int = 0x00000001; -pub const WUNTRACED: ::c_int = 0x00000002; - -pub const RTLD_NOW: ::c_int = 0x2; -pub const RTLD_DEFAULT: *mut ::c_void = -2isize as *mut ::c_void; - -pub const LOG_CRON: ::c_int = 9 << 3; -pub const LOG_AUTHPRIV: ::c_int = 10 << 3; -pub const LOG_FTP: ::c_int = 11 << 3; -pub const LOG_PERROR: ::c_int = 0x20; - -pub const PIPE_BUF: usize = 512; - -f! { - pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () { - let bits = mem::size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - (*set).fds_bits[fd / bits] &= !(1 << (fd % bits)); - return - } - - pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool { - let bits = mem::size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - return ((*set).fds_bits[fd / bits] & (1 << (fd % bits))) != 0 - } - - pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () { - let bits = mem::size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - (*set).fds_bits[fd / bits] |= 1 << (fd % bits); - return - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } - - pub fn WTERMSIG(status: ::c_int) -> ::c_int { - status & 0o177 - } - - pub fn WIFEXITED(status: ::c_int) -> bool { - (status & 0o177) == 0 - } - - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { - status >> 8 - } - - pub fn WCOREDUMP(status: ::c_int) -> bool { - (status & 0o200) != 0 - } -} - -extern { - pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int; - pub fn freeifaddrs(ifa: *mut ::ifaddrs); - pub fn setgroups(ngroups: ::c_int, - ptr: *const ::gid_t) -> ::c_int; - pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; - pub fn kqueue() -> ::c_int; - pub fn unmount(target: *const ::c_char, arg: ::c_int) -> ::c_int; - pub fn syscall(num: ::c_int, ...) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__getpwnam_r50")] - pub fn getpwnam_r(name: *const ::c_char, - pwd: *mut passwd, - buf: *mut ::c_char, - buflen: ::size_t, - result: *mut *mut passwd) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__getpwuid_r50")] - pub fn getpwuid_r(uid: ::uid_t, - pwd: *mut passwd, - buf: *mut ::c_char, - buflen: ::size_t, - result: *mut *mut passwd) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__getpwent50")] - pub fn getpwent() -> *mut passwd; - pub fn setpwent(); - pub fn getprogname() -> *const ::c_char; - pub fn setprogname(name: *const ::c_char); - pub fn getloadavg(loadavg: *mut ::c_double, nelem: ::c_int) -> ::c_int; - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); -} - -cfg_if! { - if #[cfg(any(target_os = "macos", target_os = "ios"))] { - mod apple; - pub use self::apple::*; - } else if #[cfg(any(target_os = "openbsd", target_os = "netbsd", - target_os = "bitrig"))] { - mod netbsdlike; - pub use self::netbsdlike::*; - } else if #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] { - mod freebsdlike; - pub use self::freebsdlike::*; - } else { - // Unknown target_os - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,547 +0,0 @@ -pub type time_t = i64; -pub type mode_t = u32; -pub type nlink_t = ::uint32_t; -pub type ino_t = ::uint64_t; -pub type pthread_key_t = ::c_int; -pub type rlim_t = u64; -pub type speed_t = ::c_uint; -pub type tcflag_t = ::c_uint; -pub type nl_item = c_long; -pub type clockid_t = ::c_int; -pub type id_t = ::uint32_t; -pub type sem_t = *mut sem; - -pub enum timezone {} -pub enum sem {} - -s! { - pub struct sigaction { - pub sa_sigaction: ::sighandler_t, - pub sa_mask: ::sigset_t, - pub sa_flags: ::c_int, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_size: ::size_t, - pub ss_flags: ::c_int, - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: ::sa_family_t, - pub sin_port: ::in_port_t, - pub sin_addr: ::in_addr, - pub sin_zero: [::int8_t; 8], - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_cc: [::cc_t; ::NCCS], - pub c_ispeed: ::c_int, - pub c_ospeed: ::c_int, - } - - pub struct flock { - pub l_start: ::off_t, - pub l_len: ::off_t, - pub l_pid: ::pid_t, - pub l_type: ::c_short, - pub l_whence: ::c_short, - } -} - -pub const D_T_FMT: ::nl_item = 0; -pub const D_FMT: ::nl_item = 1; -pub const T_FMT: ::nl_item = 2; -pub const T_FMT_AMPM: ::nl_item = 3; -pub const AM_STR: ::nl_item = 4; -pub const PM_STR: ::nl_item = 5; - -pub const DAY_1: ::nl_item = 6; -pub const DAY_2: ::nl_item = 7; -pub const DAY_3: ::nl_item = 8; -pub const DAY_4: ::nl_item = 9; -pub const DAY_5: ::nl_item = 10; -pub const DAY_6: ::nl_item = 11; -pub const DAY_7: ::nl_item = 12; - -pub const ABDAY_1: ::nl_item = 13; -pub const ABDAY_2: ::nl_item = 14; -pub const ABDAY_3: ::nl_item = 15; -pub const ABDAY_4: ::nl_item = 16; -pub const ABDAY_5: ::nl_item = 17; -pub const ABDAY_6: ::nl_item = 18; -pub const ABDAY_7: ::nl_item = 19; - -pub const MON_1: ::nl_item = 20; -pub const MON_2: ::nl_item = 21; -pub const MON_3: ::nl_item = 22; -pub const MON_4: ::nl_item = 23; -pub const MON_5: ::nl_item = 24; -pub const MON_6: ::nl_item = 25; -pub const MON_7: ::nl_item = 26; -pub const MON_8: ::nl_item = 27; -pub const MON_9: ::nl_item = 28; -pub const MON_10: ::nl_item = 29; -pub const MON_11: ::nl_item = 30; -pub const MON_12: ::nl_item = 31; - -pub const ABMON_1: ::nl_item = 32; -pub const ABMON_2: ::nl_item = 33; -pub const ABMON_3: ::nl_item = 34; -pub const ABMON_4: ::nl_item = 35; -pub const ABMON_5: ::nl_item = 36; -pub const ABMON_6: ::nl_item = 37; -pub const ABMON_7: ::nl_item = 38; -pub const ABMON_8: ::nl_item = 39; -pub const ABMON_9: ::nl_item = 40; -pub const ABMON_10: ::nl_item = 41; -pub const ABMON_11: ::nl_item = 42; -pub const ABMON_12: ::nl_item = 43; - -pub const RADIXCHAR: ::nl_item = 44; -pub const THOUSEP: ::nl_item = 45; -pub const YESSTR: ::nl_item = 46; -pub const YESEXPR: ::nl_item = 47; -pub const NOSTR: ::nl_item = 48; -pub const NOEXPR: ::nl_item = 49; -pub const CRNCYSTR: ::nl_item = 50; - -pub const CODESET: ::nl_item = 51; - -pub const EXIT_FAILURE : ::c_int = 1; -pub const EXIT_SUCCESS : ::c_int = 0; -pub const RAND_MAX : ::c_int = 2147483647; -pub const EOF : ::c_int = -1; -pub const SEEK_SET : ::c_int = 0; -pub const SEEK_CUR : ::c_int = 1; -pub const SEEK_END : ::c_int = 2; -pub const _IOFBF : ::c_int = 0; -pub const _IONBF : ::c_int = 2; -pub const _IOLBF : ::c_int = 1; -pub const BUFSIZ : ::c_uint = 1024; -pub const FOPEN_MAX : ::c_uint = 20; -pub const FILENAME_MAX : ::c_uint = 1024; -pub const L_tmpnam : ::c_uint = 1024; -pub const O_RDONLY : ::c_int = 0; -pub const O_WRONLY : ::c_int = 1; -pub const O_RDWR : ::c_int = 2; -pub const O_ACCMODE : ::c_int = 3; -pub const O_APPEND : ::c_int = 8; -pub const O_CREAT : ::c_int = 512; -pub const O_EXCL : ::c_int = 2048; -pub const O_NOCTTY : ::c_int = 32768; -pub const O_TRUNC : ::c_int = 1024; -pub const O_SYNC : ::c_int = 128; -pub const S_IFIFO : mode_t = 4096; -pub const S_IFCHR : mode_t = 8192; -pub const S_IFBLK : mode_t = 24576; -pub const S_IFDIR : mode_t = 16384; -pub const S_IFREG : mode_t = 32768; -pub const S_IFLNK : mode_t = 40960; -pub const S_IFSOCK : mode_t = 49152; -pub const S_IFMT : mode_t = 61440; -pub const S_IEXEC : mode_t = 64; -pub const S_IWRITE : mode_t = 128; -pub const S_IREAD : mode_t = 256; -pub const S_IRWXU : mode_t = 448; -pub const S_IXUSR : mode_t = 64; -pub const S_IWUSR : mode_t = 128; -pub const S_IRUSR : mode_t = 256; -pub const S_IRWXG : mode_t = 56; -pub const S_IXGRP : mode_t = 8; -pub const S_IWGRP : mode_t = 16; -pub const S_IRGRP : mode_t = 32; -pub const S_IRWXO : mode_t = 7; -pub const S_IXOTH : mode_t = 1; -pub const S_IWOTH : mode_t = 2; -pub const S_IROTH : mode_t = 4; -pub const F_OK : ::c_int = 0; -pub const R_OK : ::c_int = 4; -pub const W_OK : ::c_int = 2; -pub const X_OK : ::c_int = 1; -pub const STDIN_FILENO : ::c_int = 0; -pub const STDOUT_FILENO : ::c_int = 1; -pub const STDERR_FILENO : ::c_int = 2; -pub const F_LOCK : ::c_int = 1; -pub const F_TEST : ::c_int = 3; -pub const F_TLOCK : ::c_int = 2; -pub const F_ULOCK : ::c_int = 0; -pub const F_GETLK: ::c_int = 7; -pub const F_SETLK: ::c_int = 8; -pub const F_SETLKW: ::c_int = 9; -pub const SIGHUP : ::c_int = 1; -pub const SIGINT : ::c_int = 2; -pub const SIGQUIT : ::c_int = 3; -pub const SIGILL : ::c_int = 4; -pub const SIGABRT : ::c_int = 6; -pub const SIGEMT: ::c_int = 7; -pub const SIGFPE : ::c_int = 8; -pub const SIGKILL : ::c_int = 9; -pub const SIGSEGV : ::c_int = 11; -pub const SIGPIPE : ::c_int = 13; -pub const SIGALRM : ::c_int = 14; -pub const SIGTERM : ::c_int = 15; -pub const SIGSTKSZ : ::size_t = 40960; - -pub const PROT_NONE : ::c_int = 0; -pub const PROT_READ : ::c_int = 1; -pub const PROT_WRITE : ::c_int = 2; -pub const PROT_EXEC : ::c_int = 4; - -pub const MAP_FILE : ::c_int = 0x0000; -pub const MAP_SHARED : ::c_int = 0x0001; -pub const MAP_PRIVATE : ::c_int = 0x0002; -pub const MAP_FIXED : ::c_int = 0x0010; -pub const MAP_ANON : ::c_int = 0x1000; - -pub const MAP_FAILED : *mut ::c_void = !0 as *mut ::c_void; - -pub const MCL_CURRENT : ::c_int = 0x0001; -pub const MCL_FUTURE : ::c_int = 0x0002; - -pub const MS_ASYNC : ::c_int = 0x0001; - -pub const EPERM : ::c_int = 1; -pub const ENOENT : ::c_int = 2; -pub const ESRCH : ::c_int = 3; -pub const EINTR : ::c_int = 4; -pub const EIO : ::c_int = 5; -pub const ENXIO : ::c_int = 6; -pub const E2BIG : ::c_int = 7; -pub const ENOEXEC : ::c_int = 8; -pub const EBADF : ::c_int = 9; -pub const ECHILD : ::c_int = 10; -pub const EDEADLK : ::c_int = 11; -pub const ENOMEM : ::c_int = 12; -pub const EACCES : ::c_int = 13; -pub const EFAULT : ::c_int = 14; -pub const ENOTBLK : ::c_int = 15; -pub const EBUSY : ::c_int = 16; -pub const EEXIST : ::c_int = 17; -pub const EXDEV : ::c_int = 18; -pub const ENODEV : ::c_int = 19; -pub const ENOTDIR : ::c_int = 20; -pub const EISDIR : ::c_int = 21; -pub const EINVAL : ::c_int = 22; -pub const ENFILE : ::c_int = 23; -pub const EMFILE : ::c_int = 24; -pub const ENOTTY : ::c_int = 25; -pub const ETXTBSY : ::c_int = 26; -pub const EFBIG : ::c_int = 27; -pub const ENOSPC : ::c_int = 28; -pub const ESPIPE : ::c_int = 29; -pub const EROFS : ::c_int = 30; -pub const EMLINK : ::c_int = 31; -pub const EPIPE : ::c_int = 32; -pub const EDOM : ::c_int = 33; -pub const ERANGE : ::c_int = 34; -pub const EAGAIN : ::c_int = 35; -pub const EWOULDBLOCK : ::c_int = 35; -pub const EINPROGRESS : ::c_int = 36; -pub const EALREADY : ::c_int = 37; -pub const ENOTSOCK : ::c_int = 38; -pub const EDESTADDRREQ : ::c_int = 39; -pub const EMSGSIZE : ::c_int = 40; -pub const EPROTOTYPE : ::c_int = 41; -pub const ENOPROTOOPT : ::c_int = 42; -pub const EPROTONOSUPPORT : ::c_int = 43; -pub const ESOCKTNOSUPPORT : ::c_int = 44; -pub const EOPNOTSUPP : ::c_int = 45; -pub const EPFNOSUPPORT : ::c_int = 46; -pub const EAFNOSUPPORT : ::c_int = 47; -pub const EADDRINUSE : ::c_int = 48; -pub const EADDRNOTAVAIL : ::c_int = 49; -pub const ENETDOWN : ::c_int = 50; -pub const ENETUNREACH : ::c_int = 51; -pub const ENETRESET : ::c_int = 52; -pub const ECONNABORTED : ::c_int = 53; -pub const ECONNRESET : ::c_int = 54; -pub const ENOBUFS : ::c_int = 55; -pub const EISCONN : ::c_int = 56; -pub const ENOTCONN : ::c_int = 57; -pub const ESHUTDOWN : ::c_int = 58; -pub const ETOOMANYREFS : ::c_int = 59; -pub const ETIMEDOUT : ::c_int = 60; -pub const ECONNREFUSED : ::c_int = 61; -pub const ELOOP : ::c_int = 62; -pub const ENAMETOOLONG : ::c_int = 63; -pub const EHOSTDOWN : ::c_int = 64; -pub const EHOSTUNREACH : ::c_int = 65; -pub const ENOTEMPTY : ::c_int = 66; -pub const EPROCLIM : ::c_int = 67; -pub const EUSERS : ::c_int = 68; -pub const EDQUOT : ::c_int = 69; -pub const ESTALE : ::c_int = 70; -pub const EREMOTE : ::c_int = 71; -pub const EBADRPC : ::c_int = 72; -pub const ERPCMISMATCH : ::c_int = 73; -pub const EPROGUNAVAIL : ::c_int = 74; -pub const EPROGMISMATCH : ::c_int = 75; -pub const EPROCUNAVAIL : ::c_int = 76; -pub const ENOLCK : ::c_int = 77; -pub const ENOSYS : ::c_int = 78; -pub const EFTYPE : ::c_int = 79; -pub const EAUTH : ::c_int = 80; -pub const ENEEDAUTH : ::c_int = 81; - -pub const F_DUPFD : ::c_int = 0; -pub const F_GETFD : ::c_int = 1; -pub const F_SETFD : ::c_int = 2; -pub const F_GETFL : ::c_int = 3; -pub const F_SETFL : ::c_int = 4; - -pub const SIGTRAP : ::c_int = 5; - -pub const GLOB_APPEND : ::c_int = 0x0001; -pub const GLOB_DOOFFS : ::c_int = 0x0002; -pub const GLOB_ERR : ::c_int = 0x0004; -pub const GLOB_MARK : ::c_int = 0x0008; -pub const GLOB_NOCHECK : ::c_int = 0x0010; -pub const GLOB_NOSORT : ::c_int = 0x0020; -pub const GLOB_NOESCAPE : ::c_int = 0x1000; - -pub const GLOB_NOSPACE : ::c_int = -1; -pub const GLOB_ABORTED : ::c_int = -2; -pub const GLOB_NOMATCH : ::c_int = -3; -pub const GLOB_NOSYS : ::c_int = -4; - -pub const POSIX_MADV_NORMAL : ::c_int = 0; -pub const POSIX_MADV_RANDOM : ::c_int = 1; -pub const POSIX_MADV_SEQUENTIAL : ::c_int = 2; -pub const POSIX_MADV_WILLNEED : ::c_int = 3; -pub const POSIX_MADV_DONTNEED : ::c_int = 4; - -pub const _SC_XOPEN_SHM : ::c_int = 30; - -pub const PTHREAD_CREATE_JOINABLE : ::c_int = 0; -pub const PTHREAD_CREATE_DETACHED : ::c_int = 1; - -// http://man.openbsd.org/OpenBSD-current/man2/clock_getres.2 -// The man page says clock_gettime(3) can accept various values as clockid_t but -// http://fxr.watson.org/fxr/source/kern/kern_time.c?v=OPENBSD;im=excerpts#L161 -// the implementation rejects anything other than the below two -// -// http://netbsd.gw.com/cgi-bin/man-cgi?clock_gettime -// https://github.com/jsonn/src/blob/HEAD/sys/kern/subr_time.c#L222 -// Basically the same goes for NetBSD -pub const CLOCK_REALTIME: clockid_t = 0; -pub const CLOCK_MONOTONIC: clockid_t = 3; - -pub const RLIMIT_CPU: ::c_int = 0; -pub const RLIMIT_FSIZE: ::c_int = 1; -pub const RLIMIT_DATA: ::c_int = 2; -pub const RLIMIT_STACK: ::c_int = 3; -pub const RLIMIT_CORE: ::c_int = 4; -pub const RLIMIT_RSS: ::c_int = 5; -pub const RLIMIT_MEMLOCK: ::c_int = 6; -pub const RLIMIT_NPROC: ::c_int = 7; -pub const RLIMIT_NOFILE: ::c_int = 8; - -pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; -pub const RLIM_SAVED_MAX: rlim_t = RLIM_INFINITY; -pub const RLIM_SAVED_CUR: rlim_t = RLIM_INFINITY; - -pub const RUSAGE_SELF: ::c_int = 0; -pub const RUSAGE_CHILDREN: ::c_int = -1; - -pub const MADV_NORMAL : ::c_int = 0; -pub const MADV_RANDOM : ::c_int = 1; -pub const MADV_SEQUENTIAL : ::c_int = 2; -pub const MADV_WILLNEED : ::c_int = 3; -pub const MADV_DONTNEED : ::c_int = 4; -pub const MADV_FREE : ::c_int = 6; - -pub const AF_UNIX: ::c_int = 1; -pub const AF_INET: ::c_int = 2; -pub const AF_INET6: ::c_int = 24; -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; -pub const SOCK_RAW: ::c_int = 3; -pub const SOCK_SEQPACKET: ::c_int = 5; -pub const IPPROTO_TCP: ::c_int = 6; -pub const IPPROTO_IP: ::c_int = 0; -pub const IPPROTO_IPV6: ::c_int = 41; -pub const IP_MULTICAST_TTL: ::c_int = 10; -pub const IP_MULTICAST_LOOP: ::c_int = 11; -pub const IP_TTL: ::c_int = 4; -pub const IP_HDRINCL: ::c_int = 2; -pub const IP_ADD_MEMBERSHIP: ::c_int = 12; -pub const IP_DROP_MEMBERSHIP: ::c_int = 13; - -pub const TCP_NODELAY: ::c_int = 0x01; -pub const SOL_SOCKET: ::c_int = 0xffff; -pub const SO_DEBUG: ::c_int = 0x01; -pub const SO_ACCEPTCONN: ::c_int = 0x0002; -pub const SO_REUSEADDR: ::c_int = 0x0004; -pub const SO_KEEPALIVE: ::c_int = 0x0008; -pub const SO_DONTROUTE: ::c_int = 0x0010; -pub const SO_BROADCAST: ::c_int = 0x0020; -pub const SO_USELOOPBACK: ::c_int = 0x0040; -pub const SO_LINGER: ::c_int = 0x0080; -pub const SO_OOBINLINE: ::c_int = 0x0100; -pub const SO_REUSEPORT: ::c_int = 0x0200; -pub const SO_SNDBUF: ::c_int = 0x1001; -pub const SO_RCVBUF: ::c_int = 0x1002; -pub const SO_SNDLOWAT: ::c_int = 0x1003; -pub const SO_RCVLOWAT: ::c_int = 0x1004; -pub const SO_ERROR: ::c_int = 0x1007; -pub const SO_TYPE: ::c_int = 0x1008; - -pub const MSG_NOSIGNAL: ::c_int = 0x400; - -pub const IFF_LOOPBACK: ::c_int = 0x8; - -pub const SHUT_RD: ::c_int = 0; -pub const SHUT_WR: ::c_int = 1; -pub const SHUT_RDWR: ::c_int = 2; - -pub const LOCK_SH: ::c_int = 1; -pub const LOCK_EX: ::c_int = 2; -pub const LOCK_NB: ::c_int = 4; -pub const LOCK_UN: ::c_int = 8; - -pub const O_NONBLOCK : ::c_int = 4; - -pub const IPPROTO_RAW : ::c_int = 255; - -pub const _SC_ARG_MAX : ::c_int = 1; -pub const _SC_CHILD_MAX : ::c_int = 2; -pub const _SC_NGROUPS_MAX : ::c_int = 4; -pub const _SC_OPEN_MAX : ::c_int = 5; -pub const _SC_JOB_CONTROL : ::c_int = 6; -pub const _SC_SAVED_IDS : ::c_int = 7; -pub const _SC_VERSION : ::c_int = 8; -pub const _SC_BC_BASE_MAX : ::c_int = 9; -pub const _SC_BC_DIM_MAX : ::c_int = 10; -pub const _SC_BC_SCALE_MAX : ::c_int = 11; -pub const _SC_BC_STRING_MAX : ::c_int = 12; -pub const _SC_COLL_WEIGHTS_MAX : ::c_int = 13; -pub const _SC_EXPR_NEST_MAX : ::c_int = 14; -pub const _SC_LINE_MAX : ::c_int = 15; -pub const _SC_RE_DUP_MAX : ::c_int = 16; -pub const _SC_2_VERSION : ::c_int = 17; -pub const _SC_2_C_BIND : ::c_int = 18; -pub const _SC_2_C_DEV : ::c_int = 19; -pub const _SC_2_CHAR_TERM : ::c_int = 20; -pub const _SC_2_FORT_DEV : ::c_int = 21; -pub const _SC_2_FORT_RUN : ::c_int = 22; -pub const _SC_2_LOCALEDEF : ::c_int = 23; -pub const _SC_2_SW_DEV : ::c_int = 24; -pub const _SC_2_UPE : ::c_int = 25; -pub const _SC_STREAM_MAX : ::c_int = 26; -pub const _SC_TZNAME_MAX : ::c_int = 27; -pub const _SC_PAGESIZE : ::c_int = 28; -pub const _SC_PAGE_SIZE: ::c_int = _SC_PAGESIZE; -pub const _SC_FSYNC : ::c_int = 29; - -pub const Q_GETQUOTA: ::c_int = 0x300; -pub const Q_SETQUOTA: ::c_int = 0x400; - -pub const RTLD_GLOBAL: ::c_int = 0x100; - -pub const LOG_NFACILITIES: ::c_int = 24; - -pub const HW_NCPU: ::c_int = 3; - -pub const SEM_FAILED: *mut sem_t = 0 as *mut sem_t; - -f! { - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { - status >> 8 - } - - pub fn WIFSIGNALED(status: ::c_int) -> bool { - (status & 0o177) != 0o177 && (status & 0o177) != 0 - } - - pub fn WIFSTOPPED(status: ::c_int) -> bool { - (status & 0o177) == 0o177 - } -} - -#[link(name = "util")] -extern { - pub fn mincore(addr: *mut ::c_void, len: ::size_t, - vec: *mut ::c_char) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__clock_getres50")] - pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__clock_gettime50")] - pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn __errno() -> *mut ::c_int; - pub fn shm_open(name: *const ::c_char, oflag: ::c_int, mode: ::mode_t) - -> ::c_int; - pub fn memrchr(cx: *const ::c_void, - c: ::c_int, - n: ::size_t) -> *mut ::c_void; - pub fn mkostemp(template: *mut ::c_char, flags: ::c_int) -> ::c_int; - pub fn mkostemps(template: *mut ::c_char, - suffixlen: ::c_int, - flags: ::c_int) -> ::c_int; - pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int; - pub fn fdatasync(fd: ::c_int) -> ::c_int; - pub fn openpty(amaster: *mut ::c_int, - aslave: *mut ::c_int, - name: *mut ::c_char, - termp: *mut termios, - winp: *mut ::winsize) -> ::c_int; - pub fn forkpty(amaster: *mut ::c_int, - name: *mut ::c_char, - termp: *mut termios, - winp: *mut ::winsize) -> ::pid_t; - pub fn getpriority(which: ::c_int, who: ::id_t) -> ::c_int; - pub fn setpriority(which: ::c_int, who: ::id_t, prio: ::c_int) -> ::c_int; - - pub fn openat(dirfd: ::c_int, pathname: *const ::c_char, - flags: ::c_int, ...) -> ::c_int; - pub fn faccessat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::c_int, flags: ::c_int) -> ::c_int; - pub fn fchmodat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t, flags: ::c_int) -> ::c_int; - pub fn fchownat(dirfd: ::c_int, pathname: *const ::c_char, - owner: ::uid_t, group: ::gid_t, - flags: ::c_int) -> ::c_int; - pub fn fstatat(dirfd: ::c_int, pathname: *const ::c_char, - buf: *mut stat, flags: ::c_int) -> ::c_int; - pub fn linkat(olddirfd: ::c_int, oldpath: *const ::c_char, - newdirfd: ::c_int, newpath: *const ::c_char, - flags: ::c_int) -> ::c_int; - pub fn mkdirat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t) -> ::c_int; - pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t, dev: dev_t) -> ::c_int; - pub fn readlinkat(dirfd: ::c_int, pathname: *const ::c_char, - buf: *mut ::c_char, bufsiz: ::size_t) -> ::ssize_t; - pub fn renameat(olddirfd: ::c_int, oldpath: *const ::c_char, - newdirfd: ::c_int, newpath: *const ::c_char) - -> ::c_int; - pub fn symlinkat(target: *const ::c_char, newdirfd: ::c_int, - linkpath: *const ::c_char) -> ::c_int; - pub fn unlinkat(dirfd: ::c_int, pathname: *const ::c_char, - flags: ::c_int) -> ::c_int; - pub fn mkfifoat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t) -> ::c_int; - pub fn sem_timedwait(sem: *mut sem_t, - abstime: *const ::timespec) -> ::c_int; - pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, - clock_id: clockid_t) -> ::c_int; - pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int; - pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, - abstime: *const ::timespec) -> ::c_int; -} - -cfg_if! { - if #[cfg(target_os = "netbsd")] { - mod netbsd; - pub use self::netbsd::*; - } else if #[cfg(any(target_os = "openbsd", target_os = "bitrig"))] { - mod openbsdlike; - pub use self::openbsdlike::*; - } else { - // Unknown target_os - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,664 +0,0 @@ -pub type clock_t = ::c_uint; -pub type suseconds_t = ::c_int; -pub type dev_t = u64; -pub type blksize_t = ::int32_t; -pub type fsblkcnt_t = ::uint64_t; -pub type fsfilcnt_t = ::uint64_t; - -s! { - pub struct aiocb { - pub aio_offset: ::off_t, - pub aio_buf: *mut ::c_void, - pub aio_nbytes: ::size_t, - pub aio_fildes: ::c_int, - pub aio_lio_opcode: ::c_int, - pub aio_reqprio: ::c_int, - pub aio_sigevent: ::sigevent, - _state: ::c_int, - _errno: ::c_int, - _retval: ::ssize_t - } - - pub struct dirent { - pub d_fileno: ::ino_t, - pub d_reclen: u16, - pub d_namlen: u16, - pub d_type: u8, - pub d_name: [::c_char; 512], - } - - pub struct glob_t { - pub gl_pathc: ::size_t, - pub gl_matchc: ::size_t, - pub gl_offs: ::size_t, - pub gl_flags: ::c_int, - pub gl_pathv: *mut *mut ::c_char, - - __unused3: *mut ::c_void, - - __unused4: *mut ::c_void, - __unused5: *mut ::c_void, - __unused6: *mut ::c_void, - __unused7: *mut ::c_void, - __unused8: *mut ::c_void, - } - - pub struct sigevent { - pub sigev_notify: ::c_int, - pub sigev_signo: ::c_int, - pub sigev_value: ::sigval, - __unused1: *mut ::c_void, //actually a function pointer - pub sigev_notify_attributes: *mut ::c_void - } - - pub struct sigset_t { - __bits: [u32; 4], - } - - pub struct stat { - pub st_dev: ::dev_t, - pub st_mode: ::mode_t, - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - pub st_atime: ::time_t, - pub st_atimensec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtimensec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctimensec: ::c_long, - pub st_birthtime: ::time_t, - pub st_birthtimensec: ::c_long, - pub st_size: ::off_t, - pub st_blocks: ::blkcnt_t, - pub st_blksize: ::blksize_t, - pub st_flags: ::uint32_t, - pub st_gen: ::uint32_t, - pub st_spare: [::uint32_t; 2], - } - - pub struct statvfs { - pub f_flag: ::c_ulong, - pub f_bsize: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_iosize: ::c_ulong, - - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_bresvd: ::fsblkcnt_t, - - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_favail: ::fsfilcnt_t, - pub f_fresvd: ::fsfilcnt_t, - - pub f_syncreads: ::uint64_t, - pub f_syncwrites: ::uint64_t, - - pub f_asyncreads: ::uint64_t, - pub f_asyncwrites: ::uint64_t, - - pub f_fsidx: ::fsid_t, - pub f_fsid: ::c_ulong, - pub f_namemax: ::c_ulong, - pub f_owner: ::uid_t, - - pub f_spare: [::uint32_t; 4], - - pub f_fstypename: [::c_char; 32], - pub f_mntonname: [::c_char; 1024], - pub f_mntfromname: [::c_char; 1024], - } - - pub struct addrinfo { - pub ai_flags: ::c_int, - pub ai_family: ::c_int, - pub ai_socktype: ::c_int, - pub ai_protocol: ::c_int, - pub ai_addrlen: ::socklen_t, - pub ai_canonname: *mut ::c_char, - pub ai_addr: *mut ::sockaddr, - pub ai_next: *mut ::addrinfo, - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: ::sa_family_t, - __ss_pad1: [u8; 6], - __ss_pad2: i64, - __ss_pad3: [u8; 112], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_code: ::c_int, - pub si_errno: ::c_int, - __pad1: ::c_int, - pub si_addr: *mut ::c_void, - __pad2: [u64; 13], - } - - pub struct pthread_attr_t { - pta_magic: ::c_uint, - pta_flags: ::c_int, - pta_private: *mut ::c_void, - } - - pub struct pthread_mutex_t { - ptm_magic: ::c_uint, - ptm_errorcheck: ::c_uchar, - ptm_pad1: [u8; 3], - ptm_interlock: ::c_uchar, - ptm_pad2: [u8; 3], - ptm_owner: ::pthread_t, - ptm_waiters: *mut u8, - ptm_recursed: ::c_uint, - ptm_spare2: *mut ::c_void, - } - - pub struct pthread_mutexattr_t { - ptma_magic: ::c_uint, - ptma_private: *mut ::c_void, - } - - pub struct pthread_cond_t { - ptc_magic: ::c_uint, - ptc_lock: ::c_uchar, - ptc_waiters_first: *mut u8, - ptc_waiters_last: *mut u8, - ptc_mutex: *mut ::pthread_mutex_t, - ptc_private: *mut ::c_void, - } - - pub struct pthread_condattr_t { - ptca_magic: ::c_uint, - ptca_private: *mut ::c_void, - } - - pub struct pthread_rwlock_t { - ptr_magic: ::c_uint, - ptr_interlock: ::c_uchar, - ptr_rblocked_first: *mut u8, - ptr_rblocked_last: *mut u8, - ptr_wblocked_first: *mut u8, - ptr_wblocked_last: *mut u8, - ptr_nreaders: ::c_uint, - ptr_owner: ::pthread_t, - ptr_private: *mut ::c_void, - } - - pub struct kevent { - pub ident: ::uintptr_t, - pub filter: ::uint32_t, - pub flags: ::uint32_t, - pub fflags: ::uint32_t, - pub data: ::int64_t, - pub udata: ::intptr_t, - } - - pub struct dqblk { - pub dqb_bhardlimit: ::uint32_t, - pub dqb_bsoftlimit: ::uint32_t, - pub dqb_curblocks: ::uint32_t, - pub dqb_ihardlimit: ::uint32_t, - pub dqb_isoftlimit: ::uint32_t, - pub dqb_curinodes: ::uint32_t, - pub dqb_btime: ::int32_t, - pub dqb_itime: ::int32_t, - } - - pub struct Dl_info { - pub dli_fname: *const ::c_char, - pub dli_fbase: *mut ::c_void, - pub dli_sname: *const ::c_char, - pub dli_saddr: *const ::c_void, - } - - pub struct lconv { - pub decimal_point: *mut ::c_char, - pub thousands_sep: *mut ::c_char, - pub grouping: *mut ::c_char, - pub int_curr_symbol: *mut ::c_char, - pub currency_symbol: *mut ::c_char, - pub mon_decimal_point: *mut ::c_char, - pub mon_thousands_sep: *mut ::c_char, - pub mon_grouping: *mut ::c_char, - pub positive_sign: *mut ::c_char, - pub negative_sign: *mut ::c_char, - pub int_frac_digits: ::c_char, - pub frac_digits: ::c_char, - pub p_cs_precedes: ::c_char, - pub p_sep_by_space: ::c_char, - pub n_cs_precedes: ::c_char, - pub n_sep_by_space: ::c_char, - pub p_sign_posn: ::c_char, - pub n_sign_posn: ::c_char, - pub int_p_cs_precedes: ::c_char, - pub int_n_cs_precedes: ::c_char, - pub int_p_sep_by_space: ::c_char, - pub int_n_sep_by_space: ::c_char, - pub int_p_sign_posn: ::c_char, - pub int_n_sign_posn: ::c_char, - } -} - -pub const LC_COLLATE_MASK: ::c_int = (1 << ::LC_COLLATE); -pub const LC_CTYPE_MASK: ::c_int = (1 << ::LC_CTYPE); -pub const LC_MONETARY_MASK: ::c_int = (1 << ::LC_MONETARY); -pub const LC_NUMERIC_MASK: ::c_int = (1 << ::LC_NUMERIC); -pub const LC_TIME_MASK: ::c_int = (1 << ::LC_TIME); -pub const LC_MESSAGES_MASK: ::c_int = (1 << ::LC_MESSAGES); -pub const LC_ALL_MASK: ::c_int = !0; - -pub const ERA: ::nl_item = 52; -pub const ERA_D_FMT: ::nl_item = 53; -pub const ERA_D_T_FMT: ::nl_item = 54; -pub const ERA_T_FMT: ::nl_item = 55; -pub const ALT_DIGITS: ::nl_item = 56; - -pub const O_CLOEXEC: ::c_int = 0x400000; -pub const O_ALT_IO: ::c_int = 0x40000; -pub const O_NOSIGPIPE: ::c_int = 0x1000000; -pub const O_SEARCH: ::c_int = 0x800000; -pub const O_EXLOCK: ::c_int = 0x20; -pub const O_SHLOCK: ::c_int = 0x10; -pub const O_DIRECTORY: ::c_int = 0x200000; - -pub const MS_SYNC : ::c_int = 0x4; -pub const MS_INVALIDATE : ::c_int = 0x2; - -pub const RLIM_NLIMITS: ::c_int = 12; - -pub const ENOATTR : ::c_int = 93; -pub const EILSEQ : ::c_int = 85; -pub const EOVERFLOW : ::c_int = 84; -pub const ECANCELED : ::c_int = 87; -pub const EIDRM : ::c_int = 82; -pub const ENOMSG : ::c_int = 83; -pub const ENOTSUP : ::c_int = 86; -pub const ELAST : ::c_int = 96; - -pub const F_DUPFD_CLOEXEC : ::c_int = 12; -pub const F_CLOSEM: ::c_int = 10; -pub const F_GETNOSIGPIPE: ::c_int = 13; -pub const F_SETNOSIGPIPE: ::c_int = 14; -pub const F_MAXFD: ::c_int = 11; - -pub const IPV6_JOIN_GROUP: ::c_int = 12; -pub const IPV6_LEAVE_GROUP: ::c_int = 13; - -pub const SO_SNDTIMEO: ::c_int = 0x100b; -pub const SO_RCVTIMEO: ::c_int = 0x100c; - -pub const O_DSYNC : ::c_int = 0x10000; - -pub const MAP_RENAME : ::c_int = 0x20; -pub const MAP_NORESERVE : ::c_int = 0x40; -pub const MAP_HASSEMAPHORE : ::c_int = 0x200; -pub const MAP_WIRED: ::c_int = 0x800; - -pub const _SC_IOV_MAX : ::c_int = 32; -pub const _SC_GETGR_R_SIZE_MAX : ::c_int = 47; -pub const _SC_GETPW_R_SIZE_MAX : ::c_int = 48; -pub const _SC_LOGIN_NAME_MAX : ::c_int = 37; -pub const _SC_MQ_PRIO_MAX : ::c_int = 55; -pub const _SC_NPROCESSORS_ONLN : ::c_int = 1002; -pub const _SC_THREADS : ::c_int = 41; -pub const _SC_THREAD_ATTR_STACKADDR : ::c_int = 61; -pub const _SC_THREAD_ATTR_STACKSIZE : ::c_int = 62; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS : ::c_int = 57; -pub const _SC_THREAD_KEYS_MAX : ::c_int = 58; -pub const _SC_THREAD_PRIO_INHERIT : ::c_int = 64; -pub const _SC_THREAD_PRIO_PROTECT : ::c_int = 65; -pub const _SC_THREAD_PRIORITY_SCHEDULING : ::c_int = 63; -pub const _SC_THREAD_PROCESS_SHARED : ::c_int = 66; -pub const _SC_THREAD_SAFE_FUNCTIONS : ::c_int = 67; -pub const _SC_THREAD_STACK_MIN : ::c_int = 59; -pub const _SC_THREAD_THREADS_MAX : ::c_int = 60; -pub const _SC_TTY_NAME_MAX : ::c_int = 68; -pub const _SC_ATEXIT_MAX : ::c_int = 40; -pub const _SC_CLK_TCK : ::c_int = 39; -pub const _SC_AIO_LISTIO_MAX : ::c_int = 51; -pub const _SC_AIO_MAX : ::c_int = 52; -pub const _SC_ASYNCHRONOUS_IO : ::c_int = 50; -pub const _SC_MAPPED_FILES : ::c_int = 33; -pub const _SC_MEMLOCK : ::c_int = 34; -pub const _SC_MEMLOCK_RANGE : ::c_int = 35; -pub const _SC_MEMORY_PROTECTION : ::c_int = 36; -pub const _SC_MESSAGE_PASSING : ::c_int = 53; -pub const _SC_MQ_OPEN_MAX : ::c_int = 54; -pub const _SC_PRIORITY_SCHEDULING : ::c_int = 56; -pub const _SC_SEMAPHORES : ::c_int = 42; -pub const _SC_SHARED_MEMORY_OBJECTS : ::c_int = 87; -pub const _SC_SYNCHRONIZED_IO : ::c_int = 31; -pub const _SC_TIMERS : ::c_int = 44; -pub const _SC_HOST_NAME_MAX : ::c_int = 69; - -pub const FD_SETSIZE: usize = 0x100; - -pub const ST_NOSUID: ::c_ulong = 8; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - ptm_magic: 0x33330003, - ptm_errorcheck: 0, - ptm_interlock: 0, - ptm_waiters: 0 as *mut _, - ptm_owner: 0, - ptm_pad1: [0; 3], - ptm_pad2: [0; 3], - ptm_recursed: 0, - ptm_spare2: 0 as *mut _, -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - ptc_magic: 0x55550005, - ptc_lock: 0, - ptc_waiters_first: 0 as *mut _, - ptc_waiters_last: 0 as *mut _, - ptc_mutex: 0 as *mut _, - ptc_private: 0 as *mut _, -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - ptr_magic: 0x99990009, - ptr_interlock: 0, - ptr_rblocked_first: 0 as *mut _, - ptr_rblocked_last: 0 as *mut _, - ptr_wblocked_first: 0 as *mut _, - ptr_wblocked_last: 0 as *mut _, - ptr_nreaders: 0, - ptr_owner: 0, - ptr_private: 0 as *mut _, -}; -pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; -pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_NORMAL; - -pub const EVFILT_AIO: ::int32_t = 2; -pub const EVFILT_PROC: ::int32_t = 4; -pub const EVFILT_READ: ::int32_t = 0; -pub const EVFILT_SIGNAL: ::int32_t = 5; -pub const EVFILT_TIMER: ::int32_t = 6; -pub const EVFILT_VNODE: ::int32_t = 3; -pub const EVFILT_WRITE: ::int32_t = 1; - -pub const EV_ADD: ::uint32_t = 0x1; -pub const EV_DELETE: ::uint32_t = 0x2; -pub const EV_ENABLE: ::uint32_t = 0x4; -pub const EV_DISABLE: ::uint32_t = 0x8; -pub const EV_ONESHOT: ::uint32_t = 0x10; -pub const EV_CLEAR: ::uint32_t = 0x20; -pub const EV_RECEIPT: ::uint32_t = 0x40; -pub const EV_DISPATCH: ::uint32_t = 0x80; -pub const EV_FLAG1: ::uint32_t = 0x2000; -pub const EV_ERROR: ::uint32_t = 0x4000; -pub const EV_EOF: ::uint32_t = 0x8000; -pub const EV_SYSFLAGS: ::uint32_t = 0xf000; - -pub const NOTE_LOWAT: ::uint32_t = 0x00000001; -pub const NOTE_DELETE: ::uint32_t = 0x00000001; -pub const NOTE_WRITE: ::uint32_t = 0x00000002; -pub const NOTE_EXTEND: ::uint32_t = 0x00000004; -pub const NOTE_ATTRIB: ::uint32_t = 0x00000008; -pub const NOTE_LINK: ::uint32_t = 0x00000010; -pub const NOTE_RENAME: ::uint32_t = 0x00000020; -pub const NOTE_REVOKE: ::uint32_t = 0x00000040; -pub const NOTE_EXIT: ::uint32_t = 0x80000000; -pub const NOTE_FORK: ::uint32_t = 0x40000000; -pub const NOTE_EXEC: ::uint32_t = 0x20000000; -pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; -pub const NOTE_PCTRLMASK: ::uint32_t = 0xf0000000; -pub const NOTE_TRACK: ::uint32_t = 0x00000001; -pub const NOTE_TRACKERR: ::uint32_t = 0x00000002; -pub const NOTE_CHILD: ::uint32_t = 0x00000004; - -pub const CRTSCTS: ::tcflag_t = 0x00010000; - -pub const TMP_MAX : ::c_uint = 308915776; - -pub const NI_MAXHOST: ::socklen_t = 1025; - -pub const RTLD_NOLOAD: ::c_int = 0x2000; -pub const RTLD_LOCAL: ::c_int = 0x200; - -pub const CTL_MAXNAME: ::c_int = 12; -pub const SYSCTL_NAMELEN: ::c_int = 32; -pub const SYSCTL_DEFSIZE: ::c_int = 8; -pub const CTLTYPE_NODE: ::c_int = 1; -pub const CTLTYPE_INT: ::c_int = 2; -pub const CTLTYPE_STRING: ::c_int = 3; -pub const CTLTYPE_QUAD: ::c_int = 4; -pub const CTLTYPE_STRUCT: ::c_int = 5; -pub const CTLTYPE_BOOL: ::c_int = 6; -pub const CTLFLAG_READONLY: ::c_int = 0x00000000; -pub const CTLFLAG_READWRITE: ::c_int = 0x00000070; -pub const CTLFLAG_ANYWRITE: ::c_int = 0x00000080; -pub const CTLFLAG_PRIVATE: ::c_int = 0x00000100; -pub const CTLFLAG_PERMANENT: ::c_int = 0x00000200; -pub const CTLFLAG_OWNDATA: ::c_int = 0x00000400; -pub const CTLFLAG_IMMEDIATE: ::c_int = 0x00000800; -pub const CTLFLAG_HEX: ::c_int = 0x00001000; -pub const CTLFLAG_ROOT: ::c_int = 0x00002000; -pub const CTLFLAG_ANYNUMBER: ::c_int = 0x00004000; -pub const CTLFLAG_HIDDEN: ::c_int = 0x00008000; -pub const CTLFLAG_ALIAS: ::c_int = 0x00010000; -pub const CTLFLAG_MMAP: ::c_int = 0x00020000; -pub const CTLFLAG_OWNDESC: ::c_int = 0x00040000; -pub const CTLFLAG_UNSIGNED: ::c_int = 0x00080000; -pub const SYSCTL_VERS_MASK: ::c_int = 0xff000000; -pub const SYSCTL_VERS_0: ::c_int = 0x00000000; -pub const SYSCTL_VERS_1: ::c_int = 0x01000000; -pub const SYSCTL_VERSION: ::c_int = SYSCTL_VERS_1; -pub const CTL_EOL: ::c_int = -1; -pub const CTL_QUERY: ::c_int = -2; -pub const CTL_CREATE: ::c_int = -3; -pub const CTL_CREATESYM: ::c_int = -4; -pub const CTL_DESTROY: ::c_int = -5; -pub const CTL_MMAP: ::c_int = -6; -pub const CTL_DESCRIBE: ::c_int = -7; -pub const CTL_UNSPEC: ::c_int = 0; -pub const CTL_KERN: ::c_int = 1; -pub const CTL_VM: ::c_int = 2; -pub const CTL_VFS: ::c_int = 3; -pub const CTL_NET: ::c_int = 4; -pub const CTL_DEBUG: ::c_int = 5; -pub const CTL_HW: ::c_int = 6; -pub const CTL_MACHDEP: ::c_int = 7; -pub const CTL_USER: ::c_int = 8; -pub const CTL_DDB: ::c_int = 9; -pub const CTL_PROC: ::c_int = 10; -pub const CTL_VENDOR: ::c_int = 11; -pub const CTL_EMUL: ::c_int = 12; -pub const CTL_SECURITY: ::c_int = 13; -pub const CTL_MAXID: ::c_int = 14; -pub const KERN_OSTYPE: ::c_int = 1; -pub const KERN_OSRELEASE: ::c_int = 2; -pub const KERN_OSREV: ::c_int = 3; -pub const KERN_VERSION: ::c_int = 4; -pub const KERN_MAXVNODES: ::c_int = 5; -pub const KERN_MAXPROC: ::c_int = 6; -pub const KERN_MAXFILES: ::c_int = 7; -pub const KERN_ARGMAX: ::c_int = 8; -pub const KERN_SECURELVL: ::c_int = 9; -pub const KERN_HOSTNAME: ::c_int = 10; -pub const KERN_HOSTID: ::c_int = 11; -pub const KERN_CLOCKRATE: ::c_int = 12; -pub const KERN_VNODE: ::c_int = 13; -pub const KERN_PROC: ::c_int = 14; -pub const KERN_FILE: ::c_int = 15; -pub const KERN_PROF: ::c_int = 16; -pub const KERN_POSIX1: ::c_int = 17; -pub const KERN_NGROUPS: ::c_int = 18; -pub const KERN_JOB_CONTROL: ::c_int = 19; -pub const KERN_SAVED_IDS: ::c_int = 20; -pub const KERN_OBOOTTIME: ::c_int = 21; -pub const KERN_DOMAINNAME: ::c_int = 22; -pub const KERN_MAXPARTITIONS: ::c_int = 23; -pub const KERN_RAWPARTITION: ::c_int = 24; -pub const KERN_NTPTIME: ::c_int = 25; -pub const KERN_TIMEX: ::c_int = 26; -pub const KERN_AUTONICETIME: ::c_int = 27; -pub const KERN_AUTONICEVAL: ::c_int = 28; -pub const KERN_RTC_OFFSET: ::c_int = 29; -pub const KERN_ROOT_DEVICE: ::c_int = 30; -pub const KERN_MSGBUFSIZE: ::c_int = 31; -pub const KERN_FSYNC: ::c_int = 32; -pub const KERN_OLDSYSVMSG: ::c_int = 33; -pub const KERN_OLDSYSVSEM: ::c_int = 34; -pub const KERN_OLDSYSVSHM: ::c_int = 35; -pub const KERN_OLDSHORTCORENAME: ::c_int = 36; -pub const KERN_SYNCHRONIZED_IO: ::c_int = 37; -pub const KERN_IOV_MAX: ::c_int = 38; -pub const KERN_MBUF: ::c_int = 39; -pub const KERN_MAPPED_FILES: ::c_int = 40; -pub const KERN_MEMLOCK: ::c_int = 41; -pub const KERN_MEMLOCK_RANGE: ::c_int = 42; -pub const KERN_MEMORY_PROTECTION: ::c_int = 43; -pub const KERN_LOGIN_NAME_MAX: ::c_int = 44; -pub const KERN_DEFCORENAME: ::c_int = 45; -pub const KERN_LOGSIGEXIT: ::c_int = 46; -pub const KERN_PROC2: ::c_int = 47; -pub const KERN_PROC_ARGS: ::c_int = 48; -pub const KERN_FSCALE: ::c_int = 49; -pub const KERN_CCPU: ::c_int = 50; -pub const KERN_CP_TIME: ::c_int = 51; -pub const KERN_OLDSYSVIPC_INFO: ::c_int = 52; -pub const KERN_MSGBUF: ::c_int = 53; -pub const KERN_CONSDEV: ::c_int = 54; -pub const KERN_MAXPTYS: ::c_int = 55; -pub const KERN_PIPE: ::c_int = 56; -pub const KERN_MAXPHYS: ::c_int = 57; -pub const KERN_SBMAX: ::c_int = 58; -pub const KERN_TKSTAT: ::c_int = 59; -pub const KERN_MONOTONIC_CLOCK: ::c_int = 60; -pub const KERN_URND: ::c_int = 61; -pub const KERN_LABELSECTOR: ::c_int = 62; -pub const KERN_LABELOFFSET: ::c_int = 63; -pub const KERN_LWP: ::c_int = 64; -pub const KERN_FORKFSLEEP: ::c_int = 65; -pub const KERN_POSIX_THREADS: ::c_int = 66; -pub const KERN_POSIX_SEMAPHORES: ::c_int = 67; -pub const KERN_POSIX_BARRIERS: ::c_int = 68; -pub const KERN_POSIX_TIMERS: ::c_int = 69; -pub const KERN_POSIX_SPIN_LOCKS: ::c_int = 70; -pub const KERN_POSIX_READER_WRITER_LOCKS: ::c_int = 71; -pub const KERN_DUMP_ON_PANIC: ::c_int = 72; -pub const KERN_SOMAXKVA: ::c_int = 73; -pub const KERN_ROOT_PARTITION: ::c_int = 74; -pub const KERN_DRIVERS: ::c_int = 75; -pub const KERN_BUF: ::c_int = 76; -pub const KERN_FILE2: ::c_int = 77; -pub const KERN_VERIEXEC: ::c_int = 78; -pub const KERN_CP_ID: ::c_int = 79; -pub const KERN_HARDCLOCK_TICKS: ::c_int = 80; -pub const KERN_ARND: ::c_int = 81; -pub const KERN_SYSVIPC: ::c_int = 82; -pub const KERN_BOOTTIME: ::c_int = 83; -pub const KERN_EVCNT: ::c_int = 84; -pub const KERN_MAXID: ::c_int = 85; -pub const KERN_PROC_ALL: ::c_int = 0; -pub const KERN_PROC_PID: ::c_int = 1; -pub const KERN_PROC_PGRP: ::c_int = 2; -pub const KERN_PROC_SESSION: ::c_int = 3; -pub const KERN_PROC_TTY: ::c_int = 4; -pub const KERN_PROC_UID: ::c_int = 5; -pub const KERN_PROC_RUID: ::c_int = 6; -pub const KERN_PROC_GID: ::c_int = 7; -pub const KERN_PROC_RGID: ::c_int = 8; - -pub const EAI_SYSTEM: ::c_int = 11; - -pub const AIO_CANCELED: ::c_int = 1; -pub const AIO_NOTCANCELED: ::c_int = 2; -pub const AIO_ALLDONE: ::c_int = 3; -pub const LIO_NOP: ::c_int = 0; -pub const LIO_WRITE: ::c_int = 1; -pub const LIO_READ: ::c_int = 2; -pub const LIO_WAIT: ::c_int = 1; -pub const LIO_NOWAIT: ::c_int = 0; - -pub const SIGEV_NONE: ::c_int = 0; -pub const SIGEV_SIGNAL: ::c_int = 1; -pub const SIGEV_THREAD: ::c_int = 2; - -extern { - pub fn aio_read(aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_fsync(op: ::c_int, aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_error(aiocbp: *const aiocb) -> ::c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ::ssize_t; - #[link_name = "__aio_suspend50"] - pub fn aio_suspend(aiocb_list: *const *const aiocb, nitems: ::c_int, - timeout: *const ::timespec) -> ::c_int; - pub fn aio_cancel(fd: ::c_int, aiocbp: *mut aiocb) -> ::c_int; - pub fn lio_listio(mode: ::c_int, aiocb_list: *const *mut aiocb, - nitems: ::c_int, sevp: *mut sigevent) -> ::c_int; - - pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int; - pub fn getnameinfo(sa: *const ::sockaddr, - salen: ::socklen_t, - host: *mut ::c_char, - hostlen: ::socklen_t, - serv: *mut ::c_char, - sevlen: ::socklen_t, - flags: ::c_int) -> ::c_int; - pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int) - -> ::c_int; - pub fn sysctl(name: *const ::c_int, - namelen: ::c_uint, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *const ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn sysctlbyname(name: *const ::c_char, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *const ::c_void, - newlen: ::size_t) - -> ::c_int; - #[link_name = "__kevent50"] - pub fn kevent(kq: ::c_int, - changelist: *const ::kevent, - nchanges: ::size_t, - eventlist: *mut ::kevent, - nevents: ::size_t, - timeout: *const ::timespec) -> ::c_int; - #[link_name = "__mount50"] - pub fn mount(src: *const ::c_char, - target: *const ::c_char, - flags: ::c_int, - data: *mut ::c_void, - size: ::size_t) -> ::c_int; - pub fn ptrace(requeset: ::c_int, - pid: ::pid_t, - addr: *mut ::c_void, - data: ::c_int) -> ::c_int; - pub fn pthread_setname_np(t: ::pthread_t, - name: *const ::c_char, - arg: *mut ::c_void) -> ::c_int; - pub fn pthread_getattr_np(native: ::pthread_t, - attr: *mut ::pthread_attr_t) -> ::c_int; - pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t, - guardsize: *mut ::size_t) -> ::c_int; - pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t, - stackaddr: *mut *mut ::c_void, - stacksize: *mut ::size_t) -> ::c_int; - #[link_name = "__sigtimedwait50"] - pub fn sigtimedwait(set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const ::timespec) -> ::c_int; - pub fn sigwaitinfo(set: *const sigset_t, - info: *mut siginfo_t) -> ::c_int; - pub fn duplocale(base: ::locale_t) -> ::locale_t; - pub fn freelocale(loc: ::locale_t); - pub fn localeconv_l(loc: ::locale_t) -> *mut lconv; - pub fn newlocale(mask: ::c_int, - locale: *const ::c_char, - base: ::locale_t) -> ::locale_t; -} - -mod other; -pub use self::other::*; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -pub type c_long = i32; -pub type c_ulong = u32; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -pub type c_long = i64; -pub type c_ulong = u64; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/netbsd/other/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod b64; - pub use self::b64::*; - } else if #[cfg(any(target_arch = "arm", - target_arch = "powerpc", - target_arch = "x86"))] { - mod b32; - pub use self::b32::*; - } else { - // Unknown target_arch - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,75 +0,0 @@ -s! { - pub struct lconv { - pub decimal_point: *mut ::c_char, - pub thousands_sep: *mut ::c_char, - pub grouping: *mut ::c_char, - pub int_curr_symbol: *mut ::c_char, - pub currency_symbol: *mut ::c_char, - pub mon_decimal_point: *mut ::c_char, - pub mon_thousands_sep: *mut ::c_char, - pub mon_grouping: *mut ::c_char, - pub positive_sign: *mut ::c_char, - pub negative_sign: *mut ::c_char, - pub int_frac_digits: ::c_char, - pub frac_digits: ::c_char, - pub p_cs_precedes: ::c_char, - pub p_sep_by_space: ::c_char, - pub n_cs_precedes: ::c_char, - pub n_sep_by_space: ::c_char, - pub p_sign_posn: ::c_char, - pub n_sign_posn: ::c_char, - pub int_p_cs_precedes: ::c_char, - pub int_n_cs_precedes: ::c_char, - pub int_p_sep_by_space: ::c_char, - pub int_n_sep_by_space: ::c_char, - pub int_p_sign_posn: ::c_char, - pub int_n_sign_posn: ::c_char, - } -} - -pub const LC_COLLATE_MASK: ::c_int = (1 << 0); -pub const LC_CTYPE_MASK: ::c_int = (1 << 1); -pub const LC_MESSAGES_MASK: ::c_int = (1 << 2); -pub const LC_MONETARY_MASK: ::c_int = (1 << 3); -pub const LC_NUMERIC_MASK: ::c_int = (1 << 4); -pub const LC_TIME_MASK: ::c_int = (1 << 5); -pub const LC_ALL_MASK: ::c_int = LC_COLLATE_MASK - | LC_CTYPE_MASK - | LC_MESSAGES_MASK - | LC_MONETARY_MASK - | LC_NUMERIC_MASK - | LC_TIME_MASK; - -pub const ERA: ::nl_item = 52; -pub const ERA_D_FMT: ::nl_item = 53; -pub const ERA_D_T_FMT: ::nl_item = 54; -pub const ERA_T_FMT: ::nl_item = 55; -pub const ALT_DIGITS: ::nl_item = 56; - -pub const D_MD_ORDER: ::nl_item = 57; - -pub const ALTMON_1: ::nl_item = 58; -pub const ALTMON_2: ::nl_item = 59; -pub const ALTMON_3: ::nl_item = 60; -pub const ALTMON_4: ::nl_item = 61; -pub const ALTMON_5: ::nl_item = 62; -pub const ALTMON_6: ::nl_item = 63; -pub const ALTMON_7: ::nl_item = 64; -pub const ALTMON_8: ::nl_item = 65; -pub const ALTMON_9: ::nl_item = 66; -pub const ALTMON_10: ::nl_item = 67; -pub const ALTMON_11: ::nl_item = 68; -pub const ALTMON_12: ::nl_item = 69; - -pub const KERN_RND: ::c_int = 31; - -extern { - pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; - pub fn duplocale(base: ::locale_t) -> ::locale_t; - pub fn freelocale(loc: ::locale_t) -> ::c_int; - pub fn newlocale(mask: ::c_int, - locale: *const ::c_char, - base: ::locale_t) -> ::locale_t; - pub fn uselocale(loc: ::locale_t) -> ::locale_t; - pub fn querylocale(mask: ::c_int, loc: ::locale_t) -> *const ::c_char; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,451 +0,0 @@ -pub type clock_t = i64; -pub type suseconds_t = i64; -pub type dev_t = i32; -pub type sigset_t = ::c_uint; -pub type blksize_t = ::int32_t; -pub type fsblkcnt_t = ::uint64_t; -pub type fsfilcnt_t = ::uint64_t; -pub type pthread_attr_t = *mut ::c_void; -pub type pthread_mutex_t = *mut ::c_void; -pub type pthread_mutexattr_t = *mut ::c_void; -pub type pthread_cond_t = *mut ::c_void; -pub type pthread_condattr_t = *mut ::c_void; -pub type pthread_rwlock_t = *mut ::c_void; - -s! { - pub struct dirent { - pub d_fileno: ::ino_t, - pub d_off: ::off_t, - pub d_reclen: u16, - pub d_type: u8, - pub d_namlen: u8, - __d_padding: [u8; 4], - pub d_name: [::c_char; 256], - } - - pub struct glob_t { - pub gl_pathc: ::c_int, - pub gl_matchc: ::c_int, - pub gl_offs: ::c_int, - pub gl_flags: ::c_int, - pub gl_pathv: *mut *mut ::c_char, - __unused1: *mut ::c_void, - __unused2: *mut ::c_void, - __unused3: *mut ::c_void, - __unused4: *mut ::c_void, - __unused5: *mut ::c_void, - __unused6: *mut ::c_void, - __unused7: *mut ::c_void, - } - - pub struct kevent { - pub ident: ::uintptr_t, - pub filter: ::c_short, - pub flags: ::c_ushort, - pub fflags: ::c_uint, - pub data: ::int64_t, - pub udata: *mut ::c_void, - } - - pub struct stat { - pub st_mode: ::mode_t, - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_size: ::off_t, - pub st_blocks: ::blkcnt_t, - pub st_blksize: ::blksize_t, - pub st_flags: ::uint32_t, - pub st_gen: ::uint32_t, - pub st_birthtime: ::time_t, - pub st_birthtime_nsec: ::c_long, - } - - pub struct statvfs { - pub f_bsize: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_favail: ::fsfilcnt_t, - pub f_fsid: ::c_ulong, - pub f_flag: ::c_ulong, - pub f_namemax: ::c_ulong, - } - - pub struct addrinfo { - pub ai_flags: ::c_int, - pub ai_family: ::c_int, - pub ai_socktype: ::c_int, - pub ai_protocol: ::c_int, - pub ai_addrlen: ::socklen_t, - pub ai_addr: *mut ::sockaddr, - pub ai_canonname: *mut ::c_char, - pub ai_next: *mut ::addrinfo, - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: ::sa_family_t, - __ss_pad1: [u8; 6], - __ss_pad2: i64, - __ss_pad3: [u8; 240], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_code: ::c_int, - pub si_errno: ::c_int, - pub si_addr: *mut ::c_char, - __pad: [u8; 108], - } - - pub struct Dl_info { - pub dli_fname: *const ::c_char, - pub dli_fbase: *mut ::c_void, - pub dli_sname: *const ::c_char, - pub dli_saddr: *mut ::c_void, - } - - pub struct lastlog { - ll_time: ::time_t, - ll_line: [::c_char; UT_LINESIZE], - ll_host: [::c_char; UT_HOSTSIZE], - } - - pub struct utmp { - pub ut_line: [::c_char; UT_LINESIZE], - pub ut_name: [::c_char; UT_NAMESIZE], - pub ut_host: [::c_char; UT_HOSTSIZE], - pub ut_time: ::time_t, - } -} - -pub const UT_NAMESIZE: usize = 32; -pub const UT_LINESIZE: usize = 8; -pub const UT_HOSTSIZE: usize = 256; - -pub const O_CLOEXEC: ::c_int = 0x10000; - -pub const MS_SYNC : ::c_int = 0x0002; -pub const MS_INVALIDATE : ::c_int = 0x0004; - -pub const PTHREAD_STACK_MIN : ::size_t = 2048; - -pub const ENOATTR : ::c_int = 83; -pub const EILSEQ : ::c_int = 84; -pub const EOVERFLOW : ::c_int = 87; -pub const ECANCELED : ::c_int = 88; -pub const EIDRM : ::c_int = 89; -pub const ENOMSG : ::c_int = 90; -pub const ENOTSUP : ::c_int = 91; -pub const ELAST : ::c_int = 91; - -pub const F_DUPFD_CLOEXEC : ::c_int = 10; - -pub const RLIM_NLIMITS: ::c_int = 9; - -pub const SO_SNDTIMEO: ::c_int = 0x1005; -pub const SO_RCVTIMEO: ::c_int = 0x1006; - -pub const IPV6_JOIN_GROUP: ::c_int = 12; -pub const IPV6_LEAVE_GROUP: ::c_int = 13; - -pub const O_DSYNC : ::c_int = 128; - -pub const MAP_RENAME : ::c_int = 0x0000; -pub const MAP_NORESERVE : ::c_int = 0x0000; -pub const MAP_HASSEMAPHORE : ::c_int = 0x0000; - -pub const EIPSEC : ::c_int = 82; -pub const ENOMEDIUM : ::c_int = 85; -pub const EMEDIUMTYPE : ::c_int = 86; - -pub const EAI_SYSTEM: ::c_int = -11; - -pub const RUSAGE_THREAD: ::c_int = 1; - -pub const MAP_COPY : ::c_int = 0x0002; -pub const MAP_NOEXTEND : ::c_int = 0x0000; - -pub const _SC_CLK_TCK : ::c_int = 3; -pub const _SC_IOV_MAX : ::c_int = 51; -pub const _SC_GETGR_R_SIZE_MAX : ::c_int = 100; -pub const _SC_GETPW_R_SIZE_MAX : ::c_int = 101; -pub const _SC_LOGIN_NAME_MAX : ::c_int = 102; -pub const _SC_MQ_PRIO_MAX : ::c_int = 59; -pub const _SC_NPROCESSORS_ONLN : ::c_int = 503; -pub const _SC_THREADS : ::c_int = 91; -pub const _SC_THREAD_ATTR_STACKADDR : ::c_int = 77; -pub const _SC_THREAD_ATTR_STACKSIZE : ::c_int = 78; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS : ::c_int = 80; -pub const _SC_THREAD_KEYS_MAX : ::c_int = 81; -pub const _SC_THREAD_PRIO_INHERIT : ::c_int = 82; -pub const _SC_THREAD_PRIO_PROTECT : ::c_int = 83; -pub const _SC_THREAD_PRIORITY_SCHEDULING : ::c_int = 84; -pub const _SC_THREAD_PROCESS_SHARED : ::c_int = 85; -pub const _SC_THREAD_SAFE_FUNCTIONS : ::c_int = 103; -pub const _SC_THREAD_STACK_MIN : ::c_int = 89; -pub const _SC_THREAD_THREADS_MAX : ::c_int = 90; -pub const _SC_TTY_NAME_MAX : ::c_int = 107; -pub const _SC_ATEXIT_MAX : ::c_int = 46; -pub const _SC_AIO_LISTIO_MAX : ::c_int = 42; -pub const _SC_AIO_MAX : ::c_int = 43; -pub const _SC_ASYNCHRONOUS_IO : ::c_int = 45; -pub const _SC_MAPPED_FILES : ::c_int = 53; -pub const _SC_MEMLOCK : ::c_int = 54; -pub const _SC_MEMLOCK_RANGE : ::c_int = 55; -pub const _SC_MEMORY_PROTECTION : ::c_int = 56; -pub const _SC_MESSAGE_PASSING : ::c_int = 57; -pub const _SC_MQ_OPEN_MAX : ::c_int = 58; -pub const _SC_PRIORITY_SCHEDULING : ::c_int = 61; -pub const _SC_SEMAPHORES : ::c_int = 67; -pub const _SC_SHARED_MEMORY_OBJECTS : ::c_int = 68; -pub const _SC_SYNCHRONIZED_IO : ::c_int = 75; -pub const _SC_TIMERS : ::c_int = 94; -pub const _SC_XOPEN_CRYPT : ::c_int = 117; -pub const _SC_XOPEN_ENH_I18N : ::c_int = 118; -pub const _SC_XOPEN_LEGACY : ::c_int = 119; -pub const _SC_XOPEN_REALTIME : ::c_int = 120; -pub const _SC_XOPEN_REALTIME_THREADS : ::c_int = 121; -pub const _SC_XOPEN_UNIX : ::c_int = 123; -pub const _SC_XOPEN_VERSION : ::c_int = 125; -pub const _SC_SEM_NSEMS_MAX : ::c_int = 31; -pub const _SC_SEM_VALUE_MAX : ::c_int = 32; -pub const _SC_AIO_PRIO_DELTA_MAX : ::c_int = 44; -pub const _SC_DELAYTIMER_MAX : ::c_int = 50; -pub const _SC_PRIORITIZED_IO : ::c_int = 60; -pub const _SC_REALTIME_SIGNALS : ::c_int = 64; -pub const _SC_RTSIG_MAX : ::c_int = 66; -pub const _SC_SIGQUEUE_MAX : ::c_int = 70; -pub const _SC_TIMER_MAX : ::c_int = 93; -pub const _SC_HOST_NAME_MAX : ::c_int = 33; - -pub const FD_SETSIZE: usize = 1024; - -pub const ST_NOSUID: ::c_ulong = 2; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 0 as *mut _; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 0 as *mut _; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 0 as *mut _; - -pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2; -pub const PTHREAD_MUTEX_NORMAL: ::c_int = 3; -pub const PTHREAD_MUTEX_STRICT_NP: ::c_int = 4; -pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_STRICT_NP; - -pub const EVFILT_AIO: ::int16_t = -3; -pub const EVFILT_PROC: ::int16_t = -5; -pub const EVFILT_READ: ::int16_t = -1; -pub const EVFILT_SIGNAL: ::int16_t = -6; -pub const EVFILT_TIMER: ::int16_t = -7; -pub const EVFILT_VNODE: ::int16_t = -4; -pub const EVFILT_WRITE: ::int16_t = -2; - -pub const EV_ADD: ::uint16_t = 0x1; -pub const EV_DELETE: ::uint16_t = 0x2; -pub const EV_ENABLE: ::uint16_t = 0x4; -pub const EV_DISABLE: ::uint16_t = 0x8; -pub const EV_ONESHOT: ::uint16_t = 0x10; -pub const EV_CLEAR: ::uint16_t = 0x20; -pub const EV_FLAG1: ::uint16_t = 0x2000; -pub const EV_ERROR: ::uint16_t = 0x4000; -pub const EV_EOF: ::uint16_t = 0x8000; -pub const EV_SYSFLAGS: ::uint16_t = 0xf000; - -pub const NOTE_LOWAT: ::uint32_t = 0x00000001; -pub const NOTE_EOF: ::uint32_t = 0x00000002; -pub const NOTE_DELETE: ::uint32_t = 0x00000001; -pub const NOTE_WRITE: ::uint32_t = 0x00000002; -pub const NOTE_EXTEND: ::uint32_t = 0x00000004; -pub const NOTE_ATTRIB: ::uint32_t = 0x00000008; -pub const NOTE_LINK: ::uint32_t = 0x00000010; -pub const NOTE_RENAME: ::uint32_t = 0x00000020; -pub const NOTE_REVOKE: ::uint32_t = 0x00000040; -pub const NOTE_TRUNCATE: ::uint32_t = 0x00000080; -pub const NOTE_EXIT: ::uint32_t = 0x80000000; -pub const NOTE_FORK: ::uint32_t = 0x40000000; -pub const NOTE_EXEC: ::uint32_t = 0x20000000; -pub const NOTE_PDATAMASK: ::uint32_t = 0x000fffff; -pub const NOTE_PCTRLMASK: ::uint32_t = 0xf0000000; -pub const NOTE_TRACK: ::uint32_t = 0x00000001; -pub const NOTE_TRACKERR: ::uint32_t = 0x00000002; -pub const NOTE_CHILD: ::uint32_t = 0x00000004; - -pub const TMP_MAX : ::c_uint = 0x7fffffff; - -pub const NI_MAXHOST: ::size_t = 256; - -pub const RTLD_LOCAL: ::c_int = 0; -pub const CTL_MAXNAME: ::c_int = 12; -pub const CTLTYPE_NODE: ::c_int = 1; -pub const CTLTYPE_INT: ::c_int = 2; -pub const CTLTYPE_STRING: ::c_int = 3; -pub const CTLTYPE_QUAD: ::c_int = 4; -pub const CTLTYPE_STRUCT: ::c_int = 5; -pub const CTL_UNSPEC: ::c_int = 0; -pub const CTL_KERN: ::c_int = 1; -pub const CTL_VM: ::c_int = 2; -pub const CTL_FS: ::c_int = 3; -pub const CTL_NET: ::c_int = 4; -pub const CTL_DEBUG: ::c_int = 5; -pub const CTL_HW: ::c_int = 6; -pub const CTL_MACHDEP: ::c_int = 7; -pub const CTL_DDB: ::c_int = 9; -pub const CTL_VFS: ::c_int = 10; -pub const CTL_MAXID: ::c_int = 11; -pub const KERN_OSTYPE: ::c_int = 1; -pub const KERN_OSRELEASE: ::c_int = 2; -pub const KERN_OSREV: ::c_int = 3; -pub const KERN_VERSION: ::c_int = 4; -pub const KERN_MAXVNODES: ::c_int = 5; -pub const KERN_MAXPROC: ::c_int = 6; -pub const KERN_MAXFILES: ::c_int = 7; -pub const KERN_ARGMAX: ::c_int = 8; -pub const KERN_SECURELVL: ::c_int = 9; -pub const KERN_HOSTNAME: ::c_int = 10; -pub const KERN_HOSTID: ::c_int = 11; -pub const KERN_CLOCKRATE: ::c_int = 12; -pub const KERN_PROF: ::c_int = 16; -pub const KERN_POSIX1: ::c_int = 17; -pub const KERN_NGROUPS: ::c_int = 18; -pub const KERN_JOB_CONTROL: ::c_int = 19; -pub const KERN_SAVED_IDS: ::c_int = 20; -pub const KERN_BOOTTIME: ::c_int = 21; -pub const KERN_DOMAINNAME: ::c_int = 22; -pub const KERN_MAXPARTITIONS: ::c_int = 23; -pub const KERN_RAWPARTITION: ::c_int = 24; -pub const KERN_MAXTHREAD: ::c_int = 25; -pub const KERN_NTHREADS: ::c_int = 26; -pub const KERN_OSVERSION: ::c_int = 27; -pub const KERN_SOMAXCONN: ::c_int = 28; -pub const KERN_SOMINCONN: ::c_int = 29; -pub const KERN_USERMOUNT: ::c_int = 30; -pub const KERN_NOSUIDCOREDUMP: ::c_int = 32; -pub const KERN_FSYNC: ::c_int = 33; -pub const KERN_SYSVMSG: ::c_int = 34; -pub const KERN_SYSVSEM: ::c_int = 35; -pub const KERN_SYSVSHM: ::c_int = 36; -pub const KERN_ARND: ::c_int = 37; -pub const KERN_MSGBUFSIZE: ::c_int = 38; -pub const KERN_MALLOCSTATS: ::c_int = 39; -pub const KERN_CPTIME: ::c_int = 40; -pub const KERN_NCHSTATS: ::c_int = 41; -pub const KERN_FORKSTAT: ::c_int = 42; -pub const KERN_NSELCOLL: ::c_int = 43; -pub const KERN_TTY: ::c_int = 44; -pub const KERN_CCPU: ::c_int = 45; -pub const KERN_FSCALE: ::c_int = 46; -pub const KERN_NPROCS: ::c_int = 47; -pub const KERN_MSGBUF: ::c_int = 48; -pub const KERN_POOL: ::c_int = 49; -pub const KERN_STACKGAPRANDOM: ::c_int = 50; -pub const KERN_SYSVIPC_INFO: ::c_int = 51; -pub const KERN_SPLASSERT: ::c_int = 54; -pub const KERN_PROC_ARGS: ::c_int = 55; -pub const KERN_NFILES: ::c_int = 56; -pub const KERN_TTYCOUNT: ::c_int = 57; -pub const KERN_NUMVNODES: ::c_int = 58; -pub const KERN_MBSTAT: ::c_int = 59; -pub const KERN_SEMINFO: ::c_int = 61; -pub const KERN_SHMINFO: ::c_int = 62; -pub const KERN_INTRCNT: ::c_int = 63; -pub const KERN_WATCHDOG: ::c_int = 64; -pub const KERN_PROC: ::c_int = 66; -pub const KERN_MAXCLUSTERS: ::c_int = 67; -pub const KERN_EVCOUNT: ::c_int = 68; -pub const KERN_TIMECOUNTER: ::c_int = 69; -pub const KERN_MAXLOCKSPERUID: ::c_int = 70; -pub const KERN_CPTIME2: ::c_int = 71; -pub const KERN_CACHEPCT: ::c_int = 72; -pub const KERN_FILE: ::c_int = 73; -pub const KERN_CONSDEV: ::c_int = 75; -pub const KERN_NETLIVELOCKS: ::c_int = 76; -pub const KERN_POOL_DEBUG: ::c_int = 77; -pub const KERN_PROC_CWD: ::c_int = 78; -pub const KERN_PROC_NOBROADCASTKILL: ::c_int = 79; -pub const KERN_PROC_VMMAP: ::c_int = 80; -pub const KERN_GLOBAL_PTRACE: ::c_int = 81; -pub const KERN_CONSBUFSIZE: ::c_int = 82; -pub const KERN_CONSBUF: ::c_int = 83; -pub const KERN_MAXID: ::c_int = 84; -pub const KERN_PROC_ALL: ::c_int = 0; -pub const KERN_PROC_PID: ::c_int = 1; -pub const KERN_PROC_PGRP: ::c_int = 2; -pub const KERN_PROC_SESSION: ::c_int = 3; -pub const KERN_PROC_TTY: ::c_int = 4; -pub const KERN_PROC_UID: ::c_int = 5; -pub const KERN_PROC_RUID: ::c_int = 6; -pub const KERN_PROC_KTHREAD: ::c_int = 7; -pub const KERN_PROC_SHOW_THREADS: ::c_int = 0x40000000; -pub const KERN_SYSVIPC_MSG_INFO: ::c_int = 1; -pub const KERN_SYSVIPC_SEM_INFO: ::c_int = 2; -pub const KERN_SYSVIPC_SHM_INFO: ::c_int = 3; -pub const KERN_PROC_ARGV: ::c_int = 1; -pub const KERN_PROC_NARGV: ::c_int = 2; -pub const KERN_PROC_ENV: ::c_int = 3; -pub const KERN_PROC_NENV: ::c_int = 4; -pub const KI_NGROUPS: ::c_int = 16; -pub const KI_MAXCOMLEN: ::c_int = 24; -pub const KI_WMESGLEN: ::c_int = 8; -pub const KI_MAXLOGNAME: ::c_int = 32; -pub const KI_EMULNAMELEN: ::c_int = 8; - -extern { - pub fn getnameinfo(sa: *const ::sockaddr, - salen: ::socklen_t, - host: *mut ::c_char, - hostlen: ::size_t, - serv: *mut ::c_char, - servlen: ::size_t, - flags: ::c_int) -> ::c_int; - pub fn kevent(kq: ::c_int, - changelist: *const ::kevent, - nchanges: ::c_int, - eventlist: *mut ::kevent, - nevents: ::c_int, - timeout: *const ::timespec) -> ::c_int; - pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int) - -> ::c_int; - pub fn pthread_main_np() -> ::c_int; - pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char); - pub fn pthread_stackseg_np(thread: ::pthread_t, - sinfo: *mut ::stack_t) -> ::c_int; - pub fn sysctl(name: *const ::c_int, - namelen: ::c_uint, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *mut ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn getentropy(buf: *mut ::c_void, buflen: ::size_t) -> ::c_int; - pub fn pledge(promises: *const ::c_char, - paths: *mut *const ::c_char) -> ::c_int; -} - -cfg_if! { - if #[cfg(target_os = "openbsd")] { - mod openbsd; - pub use self::openbsd::*; - } else if #[cfg(target_os = "bitrig")] { - mod bitrig; - pub use self::bitrig::*; - } else { - // Unknown target_os - } -} - -mod other; -pub use self::other::*; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -s! { - pub struct lconv { - pub decimal_point: *mut ::c_char, - pub thousands_sep: *mut ::c_char, - pub grouping: *mut ::c_char, - pub int_curr_symbol: *mut ::c_char, - pub currency_symbol: *mut ::c_char, - pub mon_decimal_point: *mut ::c_char, - pub mon_thousands_sep: *mut ::c_char, - pub mon_grouping: *mut ::c_char, - pub positive_sign: *mut ::c_char, - pub negative_sign: *mut ::c_char, - pub int_frac_digits: ::c_char, - pub frac_digits: ::c_char, - pub p_cs_precedes: ::c_char, - pub p_sep_by_space: ::c_char, - pub n_cs_precedes: ::c_char, - pub n_sep_by_space: ::c_char, - pub p_sign_posn: ::c_char, - pub n_sign_posn: ::c_char, - pub int_p_cs_precedes: ::c_char, - pub int_p_sep_by_space: ::c_char, - pub int_n_cs_precedes: ::c_char, - pub int_n_sep_by_space: ::c_char, - pub int_p_sign_posn: ::c_char, - pub int_n_sign_posn: ::c_char, - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/b32/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/b32/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/b32/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/b32/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -pub type c_long = i32; -pub type c_ulong = u32; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/b64/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/b64/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/b64/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/b64/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -pub type c_long = i64; -pub type c_ulong = u64; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/bsd/netbsdlike/openbsdlike/other/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod b64; - pub use self::b64::*; - } else if #[cfg(target_arch = "x86")] { - mod b32; - pub use self::b32::*; - } else { - // Unknown target_arch - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/haiku/b32.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/haiku/b32.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/haiku/b32.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/haiku/b32.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -pub type c_long = i32; -pub type c_ulong = u32; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/haiku/b64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/haiku/b64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/haiku/b64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/haiku/b64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -pub type c_ulong = u64; -pub type c_long = i64; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/haiku/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/haiku/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/haiku/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/haiku/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,748 +0,0 @@ -use dox::mem; - -pub type rlim_t = ::uintptr_t; -pub type sa_family_t = u8; -pub type pthread_key_t = ::c_int; -pub type nfds_t = ::c_long; -pub type tcflag_t = ::c_uint; -pub type speed_t = ::c_uint; -pub type c_char = i8; -pub type clock_t = i32; -pub type clockid_t = i32; -pub type time_t = i32; -pub type suseconds_t = i32; -pub type wchar_t = i32; -pub type off_t = i64; -pub type ino_t = i64; -pub type blkcnt_t = i64; -pub type blksize_t = i32; -pub type dev_t = i32; -pub type mode_t = u32; -pub type nlink_t = i32; -pub type useconds_t = u32; -pub type socklen_t = u32; -pub type pthread_t = ::uintptr_t; -pub type pthread_mutexattr_t = ::uintptr_t; -pub type sigset_t = u64; -pub type fsblkcnt_t = i64; -pub type fsfilcnt_t = i64; -pub type pthread_attr_t = *mut ::c_void; -pub type nl_item = ::c_int; - -pub enum timezone {} - -s! { - pub struct sockaddr { - pub sa_len: u8, - pub sa_family: sa_family_t, - pub sa_data: [::c_char; 30], - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: sa_family_t, - pub sin_port: ::in_port_t, - pub sin_addr: ::in_addr, - pub sin_zero: [u8; 24], - } - - pub struct sockaddr_in6 { - pub sin6_len: u8, - pub sin6_family: sa_family_t, - pub sin6_port: ::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: ::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_un { - pub sun_len: u8, - pub sun_family: sa_family_t, - pub sun_path: [::c_char; 126] - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: sa_family_t, - __ss_pad1: [u8; 6], - __ss_pad2: u64, - __ss_pad3: [u8; 112], - } - - pub struct addrinfo { - pub ai_flags: ::c_int, - pub ai_family: ::c_int, - pub ai_socktype: ::c_int, - pub ai_protocol: ::c_int, - pub ai_addrlen: socklen_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut ::sockaddr, - pub ai_next: *mut addrinfo, - } - - pub struct fd_set { - fds_bits: [c_ulong; FD_SETSIZE / ULONG_SIZE], - } - - pub struct tm { - pub tm_sec: ::c_int, - pub tm_min: ::c_int, - pub tm_hour: ::c_int, - pub tm_mday: ::c_int, - pub tm_mon: ::c_int, - pub tm_year: ::c_int, - pub tm_wday: ::c_int, - pub tm_yday: ::c_int, - pub tm_isdst: ::c_int, - pub tm_gmtoff: ::c_long, - pub tm_zone: *const ::c_char, - } - - pub struct utsname { - pub sysname: [::c_char; 32], - pub nodename: [::c_char; 32], - pub release: [::c_char; 32], - pub version: [::c_char; 32], - pub machine: [::c_char; 32], - } - - pub struct lconv { - pub decimal_point: *mut ::c_char, - pub thousands_sep: *mut ::c_char, - pub grouping: *mut ::c_char, - pub int_curr_symbol: *mut ::c_char, - pub currency_symbol: *mut ::c_char, - pub mon_decimal_point: *mut ::c_char, - pub mon_thousands_sep: *mut ::c_char, - pub mon_grouping: *mut ::c_char, - pub positive_sign: *mut ::c_char, - pub negative_sign: *mut ::c_char, - pub int_frac_digits: ::c_char, - pub frac_digits: ::c_char, - pub p_cs_precedes: ::c_char, - pub p_sep_by_space: ::c_char, - pub n_cs_precedes: ::c_char, - pub n_sep_by_space: ::c_char, - pub p_sign_posn: ::c_char, - pub n_sign_posn: ::c_char, - pub int_p_cs_precedes: ::c_char, - pub int_p_sep_by_space: ::c_char, - pub int_n_cs_precedes: ::c_char, - pub int_n_sep_by_space: ::c_char, - pub int_p_sign_posn: ::c_char, - pub int_n_sign_posn: ::c_char, - } - - pub struct msghdr { - pub msg_name: *mut ::c_void, - pub msg_namelen: ::socklen_t, - pub msg_iov: *mut ::iovec, - pub msg_iovlen: ::c_int, - pub msg_control: *mut ::c_void, - pub msg_controllen: ::socklen_t, - pub msg_flags: ::c_int, - } - - pub struct Dl_info { - pub dli_fname: *const ::c_char, - pub dli_fbase: *mut ::c_void, - pub dli_sname: *const ::c_char, - pub dli_saddr: *mut ::c_void, - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_line: ::c_char, - pub c_ispeed: ::speed_t, - pub c_ospeed: ::speed_t, - pub c_cc: [::cc_t; ::NCCS], - } - - pub struct stat { - pub st_dev: dev_t, - pub st_ino: ino_t, - pub st_mode: mode_t, - pub st_nlink: nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_size: off_t, - pub st_rdev: dev_t, - pub st_blksize: blksize_t, - pub st_atime: time_t, - pub st_atime_nsec: c_long, - pub st_mtime: time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: time_t, - pub st_ctime_nsec: c_long, - pub st_crtime: time_t, - pub st_crtime_nsec: c_long, - pub st_type: u32, - pub st_blocks: blkcnt_t, - } - - pub struct dirent { - pub d_dev: dev_t, - pub d_pdev: dev_t, - pub d_ino: ino_t, - pub d_pino: i64, - pub d_reclen: ::c_ushort, - pub d_name: [::c_char; 1024], // Max length is _POSIX_PATH_MAX - } - - pub struct glob_t { - pub gl_pathc: ::size_t, - __unused1: ::size_t, - pub gl_offs: ::size_t, - __unused2: ::size_t, - pub gl_pathv: *mut *mut c_char, - - __unused3: *mut ::c_void, - __unused4: *mut ::c_void, - __unused5: *mut ::c_void, - __unused6: *mut ::c_void, - __unused7: *mut ::c_void, - __unused8: *mut ::c_void, - } - - pub struct pthread_mutex_t { - flags: u32, - lock: i32, - unused: i32, - owner: i32, - owner_count: i32, - } - - pub struct pthread_cond_t { - flags: u32, - unused: i32, - mutex: *mut ::c_void, - waiter_count: i32, - lock: i32, - } - - pub struct pthread_rwlock_t { - flags: u32, - owner: i32, - lock_sem: i32, // this is actually a union - lock_count: i32, - reader_count: i32, - writer_count: i32, - waiters: [*mut ::c_void; 2], - } - - pub struct passwd { - pub pw_name: *mut ::c_char, - pub pw_passwd: *mut ::c_char, - pub pw_uid: ::uid_t, - pub pw_gid: ::gid_t, - pub pw_dir: *mut ::c_char, - pub pw_shell: *mut ::c_char, - pub pw_gecos: *mut ::c_char, - } - - pub struct statvfs { - pub f_bsize: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_favail: ::fsfilcnt_t, - pub f_fsid: ::c_ulong, - pub f_flag: ::c_ulong, - pub f_namemax: ::c_ulong, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_size: ::size_t, - pub ss_flags: ::c_int, - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_code: ::c_int, - pub si_errno: ::c_int, - pub si_pid: ::pid_t, - pub si_uid: ::uid_t, - pub si_addr: *mut ::c_void, - pub si_status: ::c_int, - pub si_band: c_long, - pub sigval: *mut ::c_void, - } - - pub struct sigaction { - pub sa_sigaction: ::sighandler_t, - pub sa_mask: ::sigset_t, - pub sa_flags: ::c_int, - sa_userdata: *mut ::c_void, - } - - pub struct sem_t { - pub se_type: i32, - pub se_named_id: i32, // this is actually a union - pub se_unnamed: i32, - pub se_padding: [i32; 4], - } - - pub struct pthread_condattr_t { - pub process_shared: bool, - pub clock_id: i32, - } -} - -// intentionally not public, only used for fd_set -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - const ULONG_SIZE: usize = 32; - } else if #[cfg(target_pointer_width = "64")] { - const ULONG_SIZE: usize = 64; - } else { - // Unknown target_pointer_width - } -} - -pub const EXIT_FAILURE: ::c_int = 1; -pub const EXIT_SUCCESS: ::c_int = 0; -pub const RAND_MAX: ::c_int = 2147483647; -pub const EOF: ::c_int = -1; -pub const SEEK_SET: ::c_int = 0; -pub const SEEK_CUR: ::c_int = 1; -pub const SEEK_END: ::c_int = 2; -pub const _IOFBF: ::c_int = 0; -pub const _IONBF: ::c_int = 2; -pub const _IOLBF: ::c_int = 1; - -pub const F_DUPFD: ::c_int = 0x0001; -pub const F_GETFD: ::c_int = 0x0002; -pub const F_SETFD: ::c_int = 0x0004; -pub const F_GETFL: ::c_int = 0x0008; -pub const F_SETFL: ::c_int = 0x0010; - -pub const SIGTRAP: ::c_int = 22; - -pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0; -pub const PTHREAD_CREATE_DETACHED: ::c_int = 1; - -pub const CLOCK_REALTIME: ::c_int = -1; -pub const CLOCK_MONOTONIC: ::c_int = 0; - -pub const RLIMIT_CORE: ::c_int = 0; -pub const RLIMIT_CPU: ::c_int = 1; -pub const RLIMIT_DATA: ::c_int = 2; -pub const RLIMIT_FSIZE: ::c_int = 3; -pub const RLIMIT_NOFILE: ::c_int = 4; -pub const RLIMIT_AS: ::c_int = 6; -// Haiku specific -pub const RLIMIT_NOVMON: ::c_int = 7; -pub const RLIMIT_NLIMITS: ::c_int = 8; - -pub const RUSAGE_SELF: ::c_int = 0; - -pub const NCCS: usize = 11; - -pub const O_RDONLY: ::c_int = 0x0000; -pub const O_WRONLY: ::c_int = 0x0001; -pub const O_RDWR: ::c_int = 0x0002; -pub const O_ACCMODE: ::c_int = 0x0003; - -pub const O_EXCL: ::c_int = 0x0100; -pub const O_CREAT: ::c_int = 0x0200; -pub const O_TRUNC: ::c_int = 0x0400; -pub const O_NOCTTY: ::c_int = 0x1000; -pub const O_NOTRAVERSE: ::c_int = 0x2000; - -pub const O_CLOEXEC: ::c_int = 0x00000040; -pub const O_NONBLOCK: ::c_int = 0x00000080; -pub const O_APPEND: ::c_int = 0x00000800; -pub const O_SYNC: ::c_int = 0x00010000; -pub const O_RSYNC: ::c_int = 0x00020000; -pub const O_DSYNC: ::c_int = 0x00040000; -pub const O_NOFOLLOW: ::c_int = 0x00080000; -pub const O_NOCACHE: ::c_int = 0x00100000; -pub const O_DIRECTORY: ::c_int = 0x00200000; - -pub const S_IFIFO: ::mode_t = 61440; -pub const S_IFCHR: ::mode_t = 49152; -pub const S_IFBLK: ::mode_t = 24576; -pub const S_IFDIR: ::mode_t = 16384; -pub const S_IFREG: ::mode_t = 32768; -pub const S_IFLNK: ::mode_t = 40960; -pub const S_IFSOCK: ::mode_t = 49152; -pub const S_IFMT: ::mode_t = 61440; -pub const S_IRWXU: ::mode_t = 448; -pub const S_IXUSR: ::mode_t = 64; -pub const S_IWUSR: ::mode_t = 128; -pub const S_IRUSR: ::mode_t = 256; -pub const S_IRWXG: ::mode_t = 70; -pub const S_IXGRP: ::mode_t = 10; -pub const S_IWGRP: ::mode_t = 20; -pub const S_IRGRP: ::mode_t = 40; -pub const S_IRWXO: ::mode_t = 7; -pub const S_IXOTH: ::mode_t = 1; -pub const S_IWOTH: ::mode_t = 2; -pub const S_IROTH: ::mode_t = 4; -pub const F_OK: ::c_int = 0; -pub const R_OK: ::c_int = 4; -pub const W_OK: ::c_int = 2; -pub const X_OK: ::c_int = 1; -pub const STDIN_FILENO: ::c_int = 0; -pub const STDOUT_FILENO: ::c_int = 1; -pub const STDERR_FILENO: ::c_int = 2; -pub const SIGHUP: ::c_int = 1; -pub const SIGINT: ::c_int = 2; -pub const SIGQUIT: ::c_int = 3; -pub const SIGILL: ::c_int = 4; -pub const SIGABRT: ::c_int = 6; -pub const SIGFPE: ::c_int = 8; -pub const SIGKILL: ::c_int = 9; -pub const SIGSEGV: ::c_int = 11; -pub const SIGPIPE: ::c_int = 7; -pub const SIGALRM: ::c_int = 14; -pub const SIGTERM: ::c_int = 15; - -pub const EAI_SYSTEM: ::c_int = 11; - -pub const PROT_NONE: ::c_int = 0; -pub const PROT_READ: ::c_int = 1; -pub const PROT_WRITE: ::c_int = 2; -pub const PROT_EXEC: ::c_int = 4; - -pub const LC_ALL: ::c_int = 0; -pub const LC_COLLATE: ::c_int = 1; -pub const LC_CTYPE: ::c_int = 2; -pub const LC_MONETARY: ::c_int = 3; -pub const LC_NUMERIC: ::c_int = 4; -pub const LC_TIME: ::c_int = 5; -pub const LC_MESSAGES: ::c_int = 6; - -// TODO: Haiku does not have MAP_FILE, but libstd/os.rs requires it -pub const MAP_FILE: ::c_int = 0x00; -pub const MAP_SHARED: ::c_int = 0x01; -pub const MAP_PRIVATE: ::c_int = 0x02; -pub const MAP_FIXED: ::c_int = 0x004; - -pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void; - -pub const MS_ASYNC: ::c_int = 0x01; -pub const MS_INVALIDATE: ::c_int = 0x04; -pub const MS_SYNC: ::c_int = 0x02; - -pub const EPERM : ::c_int = -2147483633; -pub const ENOENT : ::c_int = -2147459069; -pub const ESRCH : ::c_int = -2147454963; -pub const EINTR : ::c_int = -2147483638; -pub const EIO : ::c_int = -2147483647; -pub const ENXIO : ::c_int = -2147454965; -pub const E2BIG : ::c_int = -2147454975; -pub const ENOEXEC : ::c_int = -2147478782; -pub const EBADF : ::c_int = -2147459072; -pub const ECHILD : ::c_int = -2147454974; -pub const EDEADLK : ::c_int = -2147454973; -pub const ENOMEM : ::c_int = -2147454976; -pub const EACCES : ::c_int = -2147483646; -pub const EFAULT : ::c_int = -2147478783; -// pub const ENOTBLK : ::c_int = 15; -pub const EBUSY : ::c_int = -2147483634; -pub const EEXIST : ::c_int = -2147459070; -pub const EXDEV : ::c_int = -2147459061; -pub const ENODEV : ::c_int = -2147454969; -pub const ENOTDIR : ::c_int = -2147459067; -pub const EISDIR : ::c_int = -2147459063; -pub const EINVAL : ::c_int = -2147483643; -pub const ENFILE : ::c_int = -2147454970; -pub const EMFILE : ::c_int = -2147459062; -pub const ENOTTY : ::c_int = -2147454966; -pub const ETXTBSY : ::c_int = -2147454917; -pub const EFBIG : ::c_int = -2147454972; -pub const ENOSPC : ::c_int = -2147459065; -pub const ESPIPE : ::c_int = -2147454964; -pub const EROFS : ::c_int = -2147459064; -pub const EMLINK : ::c_int = -2147454971; -pub const EPIPE : ::c_int = -2147459059; -pub const EDOM : ::c_int = -2147454960; -pub const ERANGE : ::c_int = -2147454959; -pub const EAGAIN : ::c_int = -2147483637; -pub const EWOULDBLOCK : ::c_int = -2147483637; - -pub const EINPROGRESS : ::c_int = -2147454940; -pub const EALREADY : ::c_int = -2147454939; -pub const ENOTSOCK : ::c_int = -2147454932; -pub const EDESTADDRREQ : ::c_int = -2147454928; -pub const EMSGSIZE : ::c_int = -2147454934; -pub const EPROTOTYPE : ::c_int = -2147454958; -pub const ENOPROTOOPT : ::c_int = -2147454942; -pub const EPROTONOSUPPORT : ::c_int = -2147454957; -pub const EOPNOTSUPP : ::c_int = -2147454933; -pub const EPFNOSUPPORT : ::c_int = -2147454956; -pub const EAFNOSUPPORT : ::c_int = -2147454955; -pub const EADDRINUSE : ::c_int = -2147454954; -pub const EADDRNOTAVAIL : ::c_int = -2147454953; -pub const ENETDOWN : ::c_int = -2147454953; -pub const ENETUNREACH : ::c_int = -2147454951; -pub const ENETRESET : ::c_int = -2147454950; -pub const ECONNABORTED : ::c_int = -2147454949; -pub const ECONNRESET : ::c_int = -2147454948; -pub const ENOBUFS : ::c_int = -2147454941; -pub const EISCONN : ::c_int = -2147454947; -pub const ENOTCONN : ::c_int = -2147454946; -pub const ESHUTDOWN : ::c_int = -2147454945; -pub const ETIMEDOUT : ::c_int = -2147483639; -pub const ECONNREFUSED : ::c_int = -2147454944; -pub const ELOOP : ::c_int = -2147459060; -pub const ENAMETOOLONG : ::c_int = -2147459068; -pub const EHOSTDOWN : ::c_int = -2147454931; -pub const EHOSTUNREACH : ::c_int = -2147454943; -pub const ENOTEMPTY : ::c_int = -2147459066; -pub const EDQUOT : ::c_int = -2147454927; -pub const ESTALE : ::c_int = -2147454936; -pub const ENOLCK : ::c_int = -2147454968; -pub const ENOSYS : ::c_int = -2147454967; -pub const EIDRM : ::c_int = -2147454926; -pub const ENOMSG : ::c_int = -2147454937; -pub const EOVERFLOW : ::c_int = -2147454935; -pub const ECANCELED : ::c_int = -2147454929; -pub const EILSEQ : ::c_int = -2147454938; -pub const ENOATTR : ::c_int = -2147454916; -pub const EBADMSG : ::c_int = -2147454930; -pub const EMULTIHOP : ::c_int = -2147454925; -pub const ENOLINK : ::c_int = -2147454923; -pub const EPROTO : ::c_int = -2147454919; - -pub const IPPROTO_RAW: ::c_int = 255; - -// These are prefixed with POSIX_ on Haiku -pub const MADV_NORMAL: ::c_int = 1; -pub const MADV_SEQUENTIAL: ::c_int = 2; -pub const MADV_RANDOM: ::c_int = 3; -pub const MADV_WILLNEED: ::c_int = 4; -pub const MADV_DONTNEED: ::c_int = 5; - -pub const IFF_LOOPBACK: ::c_int = 0x0008; - -pub const AF_UNIX: ::c_int = 9; -pub const AF_INET: ::c_int = 1; -pub const AF_INET6: ::c_int = 6; -pub const SOCK_RAW: ::c_int = 3; -pub const IPPROTO_TCP: ::c_int = 6; -pub const IPPROTO_IP: ::c_int = 0; -pub const IPPROTO_IPV6: ::c_int = 41; -pub const IP_MULTICAST_TTL: ::c_int = 10; -pub const IP_MULTICAST_LOOP: ::c_int = 11; -pub const IP_TTL: ::c_int = 4; -pub const IP_HDRINCL: ::c_int = 2; -pub const IP_ADD_MEMBERSHIP: ::c_int = 12; -pub const IP_DROP_MEMBERSHIP: ::c_int = 13; - -pub const TCP_NODELAY: ::c_int = 0x01; -pub const TCP_MAXSEG: ::c_int = 0x02; -pub const TCP_NOPUSH: ::c_int = 0x04; -pub const TCP_NOOPT: ::c_int = 0x08; - -pub const IPV6_MULTICAST_LOOP: ::c_int = 26; -pub const IPV6_JOIN_GROUP: ::c_int = 28; -pub const IPV6_LEAVE_GROUP: ::c_int = 29; -pub const IPV6_V6ONLY: ::c_int = 30; - -pub const SO_DEBUG: ::c_int = 0x00000004; - -pub const MSG_NOSIGNAL: ::c_int = 0x0800; - -pub const SHUT_RD: ::c_int = 0; -pub const SHUT_WR: ::c_int = 1; -pub const SHUT_RDWR: ::c_int = 2; - -pub const LOCK_SH: ::c_int = 0x01; -pub const LOCK_EX: ::c_int = 0x02; -pub const LOCK_NB: ::c_int = 0x04; -pub const LOCK_UN: ::c_int = 0x08; - -pub const SIGSTKSZ: ::size_t = 16384; - -pub const SA_NODEFER: ::c_int = 0x08; -pub const SA_RESETHAND: ::c_int = 0x04; -pub const SA_RESTART: ::c_int = 0x10; -pub const SA_NOCLDSTOP: ::c_int = 0x01; - -pub const FD_SETSIZE: usize = 1024; - -pub const RTLD_NOW: ::c_int = 0x1; -pub const RTLD_DEFAULT: *mut ::c_void = 0isize as *mut ::c_void; - -pub const BUFSIZ: ::c_uint = 8192; -pub const FILENAME_MAX: ::c_uint = 256; -pub const FOPEN_MAX: ::c_uint = 128; -pub const L_tmpnam: ::c_uint = 512; -pub const TMP_MAX: ::c_uint = 32768; -pub const _PC_NAME_MAX: ::c_int = 4; - -pub const FIONBIO: ::c_int = 0xbe000000; - -pub const _SC_IOV_MAX : ::c_int = 32; -pub const _SC_GETGR_R_SIZE_MAX : ::c_int = 25; -pub const _SC_GETPW_R_SIZE_MAX : ::c_int = 26; -pub const _SC_PAGESIZE : ::c_int = 27; -pub const _SC_THREAD_ATTR_STACKADDR : ::c_int = 48; -pub const _SC_THREAD_ATTR_STACKSIZE : ::c_int = 49; -pub const _SC_THREAD_PRIORITY_SCHEDULING : ::c_int = 50; -pub const _SC_THREAD_PROCESS_SHARED : ::c_int = 46; -pub const _SC_THREAD_STACK_MIN : ::c_int = 47; -pub const _SC_THREADS : ::c_int = 31; -pub const _SC_ATEXIT_MAX : ::c_int = 37; - -pub const PTHREAD_STACK_MIN: ::size_t = 8192; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - flags: 0, - lock: 0, - unused: -42, - owner: -1, - owner_count: 0, -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - flags: 0, - unused: -42, - mutex: 0 as *mut _, - waiter_count: 0, - lock: 0, -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - flags: 0, - owner: 0, - lock_sem: 0, - lock_count: 0, - reader_count: 0, - writer_count: 0, - waiters: [0 as *mut _; 2], -}; - -pub const PTHREAD_MUTEX_DEFAULT: ::c_int = 0; -pub const PTHREAD_MUTEX_NORMAL: ::c_int = 1; -pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; -pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 3; - -pub const FIOCLEX: c_ulong = 0; // TODO: does not exist on Haiku! - -pub const SA_ONSTACK: c_ulong = 0x20; -pub const SA_SIGINFO: c_ulong = 0x40; -pub const SA_NOCLDWAIT: c_ulong = 0x02; - -pub const SIGCHLD: ::c_int = 5; -pub const SIGBUS: ::c_int = 30; -pub const SIG_SETMASK: ::c_int = 3; - -pub const RUSAGE_CHILDREN: ::c_int = -1; - -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; - -pub const SOL_SOCKET: ::c_int = -1; -pub const SO_ACCEPTCONN: ::c_int = 0x00000001; -pub const SO_BROADCAST: ::c_int = 0x00000002; -pub const SO_DONTROUTE: ::c_int = 0x00000008; -pub const SO_KEEPALIVE: ::c_int = 0x00000010; -pub const SO_OOBINLINE: ::c_int = 0x00000020; -pub const SO_REUSEADDR: ::c_int = 0x00000040; -pub const SO_REUSEPORT: ::c_int = 0x00000080; -pub const SO_USELOOPBACK: ::c_int = 0x00000100; -pub const SO_LINGER: ::c_int = 0x00000200; -pub const SO_SNDBUF: ::c_int = 0x40000001; -pub const SO_SNDLOWAT: ::c_int = 0x40000002; -pub const SO_SNDTIMEO: ::c_int = 0x40000003; -pub const SO_RCVBUF: ::c_int = 0x40000004; -pub const SO_RCVLOWAT: ::c_int = 0x40000005; -pub const SO_RCVTIMEO: ::c_int = 0x40000006; -pub const SO_ERROR: ::c_int = 0x40000007; -pub const SO_TYPE: ::c_int = 0x40000008; -pub const SO_NONBLOCK: ::c_int = 0x40000009; -pub const SO_BINDTODEVICE: ::c_int = 0x4000000a; -pub const SO_PEERCRED: ::c_int = 0x4000000b; - -pub const NI_MAXHOST: ::size_t = 1025; - -f! { - pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = mem::size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] &= !(1 << (fd % size)); - return - } - - pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool { - let fd = fd as usize; - let size = mem::size_of_val(&(*set).fds_bits[0]) * 8; - return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0 - } - - pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = mem::size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] |= 1 << (fd % size); - return - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } - - pub fn WIFEXITED(status: ::c_int) -> bool { - (status >> 8) == 0 - } - - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { - (status & 0xff) - } - - pub fn WTERMSIG(status: ::c_int) -> ::c_int { - (status >> 8) & 0xff - } -} - -extern { - pub fn clock_gettime(clk_id: ::c_int, tp: *mut ::timespec) -> ::c_int; - pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t, - guardsize: *mut ::size_t) -> ::c_int; - pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t, - stackaddr: *mut *mut ::c_void, - stacksize: *mut ::size_t) -> ::c_int; - pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t, - clock_id: *mut clockid_t) -> ::c_int; - pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, - clock_id: clockid_t) -> ::c_int; - pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void; - pub fn setgroups(ngroups: ::size_t, - ptr: *const ::gid_t) -> ::c_int; - pub fn getpwuid_r(uid: ::uid_t, - pwd: *mut passwd, - buffer: *mut ::c_char, - bufferSize: ::size_t, - result: *mut *mut passwd) -> ::c_int; - pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; - pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int) - -> ::c_int; - pub fn getnameinfo(sa: *const ::sockaddr, - salen: ::socklen_t, - host: *mut ::c_char, - hostlen: ::size_t, - serv: *mut ::c_char, - sevlen: ::size_t, - flags: ::c_int) -> ::c_int; - pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, - abstime: *const ::timespec) -> ::c_int; -} - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - mod b64; - pub use self::b64::*; - } else { - mod b32; - pub use self::b32::*; - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,879 +0,0 @@ -//! Definitions found commonly among almost all Unix derivatives -//! -//! More functions and definitions can be found in the more specific modules -//! according to the platform in question. - -use dox::Option; - -pub type pid_t = i32; -pub type uid_t = u32; -pub type gid_t = u32; -pub type in_addr_t = u32; -pub type in_port_t = u16; -pub type sighandler_t = ::size_t; -pub type cc_t = ::c_uchar; - -pub enum DIR {} -pub enum locale_t {} - -s! { - pub struct group { - pub gr_name: *mut ::c_char, - pub gr_passwd: *mut ::c_char, - pub gr_gid: ::gid_t, - pub gr_mem: *mut *mut ::c_char, - } - - pub struct utimbuf { - pub actime: time_t, - pub modtime: time_t, - } - - pub struct timeval { - pub tv_sec: time_t, - pub tv_usec: suseconds_t, - } - - pub struct timespec { - pub tv_sec: time_t, - pub tv_nsec: c_long, - } - - pub struct rlimit { - pub rlim_cur: rlim_t, - pub rlim_max: rlim_t, - } - - pub struct rusage { - pub ru_utime: timeval, - pub ru_stime: timeval, - pub ru_maxrss: c_long, - pub ru_ixrss: c_long, - pub ru_idrss: c_long, - pub ru_isrss: c_long, - pub ru_minflt: c_long, - pub ru_majflt: c_long, - pub ru_nswap: c_long, - pub ru_inblock: c_long, - pub ru_oublock: c_long, - pub ru_msgsnd: c_long, - pub ru_msgrcv: c_long, - pub ru_nsignals: c_long, - pub ru_nvcsw: c_long, - pub ru_nivcsw: c_long, - - #[cfg(any(target_env = "musl"))] - __reserved: [c_long; 16], - } - - #[cfg_attr(target_os = "netbsd", repr(packed))] - pub struct in_addr { - pub s_addr: in_addr_t, - } - - pub struct in6_addr { - pub s6_addr: [u8; 16], - __align: [u32; 0], - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ipv6_mreq { - pub ipv6mr_multiaddr: in6_addr, - #[cfg(target_os = "android")] - pub ipv6mr_interface: ::c_int, - #[cfg(not(target_os = "android"))] - pub ipv6mr_interface: ::c_uint, - } - - pub struct hostent { - pub h_name: *mut ::c_char, - pub h_aliases: *mut *mut ::c_char, - pub h_addrtype: ::c_int, - pub h_length: ::c_int, - pub h_addr_list: *mut *mut ::c_char, - } - - pub struct iovec { - pub iov_base: *mut ::c_void, - pub iov_len: ::size_t, - } - - pub struct pollfd { - pub fd: ::c_int, - pub events: ::c_short, - pub revents: ::c_short, - } - - pub struct winsize { - pub ws_row: ::c_ushort, - pub ws_col: ::c_ushort, - pub ws_xpixel: ::c_ushort, - pub ws_ypixel: ::c_ushort, - } - - pub struct linger { - pub l_onoff: ::c_int, - pub l_linger: ::c_int, - } - - pub struct sigval { - // Actually a union of an int and a void* - pub sival_ptr: *mut ::c_void - } -} - -pub const SIG_DFL: sighandler_t = 0 as sighandler_t; -pub const SIG_IGN: sighandler_t = 1 as sighandler_t; -pub const SIG_ERR: sighandler_t = !0 as sighandler_t; - -pub const DT_FIFO: u8 = 1; -pub const DT_CHR: u8 = 2; -pub const DT_DIR: u8 = 4; -pub const DT_BLK: u8 = 6; -pub const DT_REG: u8 = 8; -pub const DT_LNK: u8 = 10; -pub const DT_SOCK: u8 = 12; - -pub const FD_CLOEXEC: ::c_int = 0x1; - -pub const USRQUOTA: ::c_int = 0; -pub const GRPQUOTA: ::c_int = 1; - -pub const SIGIOT: ::c_int = 6; - -pub const S_ISUID: ::c_int = 0x800; -pub const S_ISGID: ::c_int = 0x400; -pub const S_ISVTX: ::c_int = 0x200; - -pub const POLLIN: ::c_short = 0x1; -pub const POLLPRI: ::c_short = 0x2; -pub const POLLOUT: ::c_short = 0x4; -pub const POLLERR: ::c_short = 0x8; -pub const POLLHUP: ::c_short = 0x10; -pub const POLLNVAL: ::c_short = 0x20; - -pub const IF_NAMESIZE: ::size_t = 16; - -pub const RTLD_LAZY: ::c_int = 0x1; - -pub const LOG_EMERG: ::c_int = 0; -pub const LOG_ALERT: ::c_int = 1; -pub const LOG_CRIT: ::c_int = 2; -pub const LOG_ERR: ::c_int = 3; -pub const LOG_WARNING: ::c_int = 4; -pub const LOG_NOTICE: ::c_int = 5; -pub const LOG_INFO: ::c_int = 6; -pub const LOG_DEBUG: ::c_int = 7; - -pub const LOG_KERN: ::c_int = 0; -pub const LOG_USER: ::c_int = 1 << 3; -pub const LOG_MAIL: ::c_int = 2 << 3; -pub const LOG_DAEMON: ::c_int = 3 << 3; -pub const LOG_AUTH: ::c_int = 4 << 3; -pub const LOG_SYSLOG: ::c_int = 5 << 3; -pub const LOG_LPR: ::c_int = 6 << 3; -pub const LOG_NEWS: ::c_int = 7 << 3; -pub const LOG_UUCP: ::c_int = 8 << 3; -pub const LOG_LOCAL0: ::c_int = 16 << 3; -pub const LOG_LOCAL1: ::c_int = 17 << 3; -pub const LOG_LOCAL2: ::c_int = 18 << 3; -pub const LOG_LOCAL3: ::c_int = 19 << 3; -pub const LOG_LOCAL4: ::c_int = 20 << 3; -pub const LOG_LOCAL5: ::c_int = 21 << 3; -pub const LOG_LOCAL6: ::c_int = 22 << 3; -pub const LOG_LOCAL7: ::c_int = 23 << 3; - -pub const LOG_PID: ::c_int = 0x01; -pub const LOG_CONS: ::c_int = 0x02; -pub const LOG_ODELAY: ::c_int = 0x04; -pub const LOG_NDELAY: ::c_int = 0x08; -pub const LOG_NOWAIT: ::c_int = 0x10; - -pub const LOG_PRIMASK: ::c_int = 7; -pub const LOG_FACMASK: ::c_int = 0x3f8; - -pub const PRIO_PROCESS: ::c_int = 0; -pub const PRIO_PGRP: ::c_int = 1; -pub const PRIO_USER: ::c_int = 2; - -pub const PRIO_MIN: ::c_int = -20; -pub const PRIO_MAX: ::c_int = 20; - -cfg_if! { - if #[cfg(dox)] { - // on dox builds don't pull in anything - } else if #[cfg(all(not(stdbuild), feature = "use_std"))] { - // cargo build, don't pull in anything extra as the libstd dep - // already pulls in all libs. - } else if #[cfg(any(all(target_env = "musl", not(target_arch = "mips"))))] { - #[link(name = "c", kind = "static", cfg(target_feature = "crt-static"))] - #[link(name = "c", cfg(not(target_feature = "crt-static")))] - extern {} - } else if #[cfg(target_os = "emscripten")] { - #[link(name = "c")] - extern {} - } else if #[cfg(all(target_os = "netbsd", target_vendor = "rumprun"))] { - // Since we don't use -nodefaultlibs on Rumprun, libc is always pulled - // in automatically by the linker. We avoid passing it explicitly, as it - // causes some versions of binutils to crash with an assertion failure. - #[link(name = "m")] - extern {} - } else if #[cfg(any(target_os = "macos", - target_os = "ios", - target_os = "android", - target_os = "openbsd", - target_os = "bitrig"))] { - #[link(name = "c")] - #[link(name = "m")] - extern {} - } else if #[cfg(target_os = "haiku")] { - #[link(name = "root")] - #[link(name = "network")] - extern {} - } else if #[cfg(target_os = "fuchsia")] { - #[link(name = "c")] - #[link(name = "mxio")] - extern {} - } else { - #[link(name = "c")] - #[link(name = "m")] - #[link(name = "rt")] - extern {} - } -} - -extern { - pub fn getgrnam(name: *const ::c_char) -> *mut group; - pub fn getgrgid(gid: ::gid_t) -> *mut group; - - pub fn endpwent(); - #[cfg_attr(target_os = "netbsd", link_name = "__getpwnam50")] - pub fn getpwnam(name: *const ::c_char) -> *mut passwd; - #[cfg_attr(target_os = "netbsd", link_name = "__getpwuid50")] - pub fn getpwuid(uid: ::uid_t) -> *mut passwd; - - pub fn fprintf(stream: *mut ::FILE, - format: *const ::c_char, ...) -> ::c_int; - pub fn printf(format: *const ::c_char, ...) -> ::c_int; - pub fn snprintf(s: *mut ::c_char, n: ::size_t, - format: *const ::c_char, ...) -> ::c_int; - pub fn sprintf(s: *mut ::c_char, format: *const ::c_char, ...) -> ::c_int; - pub fn fscanf(stream: *mut ::FILE, format: *const ::c_char, ...) -> ::c_int; - pub fn scanf(format: *const ::c_char, ...) -> ::c_int; - pub fn sscanf(s: *const ::c_char, format: *const ::c_char, ...) -> ::c_int; - pub fn getchar_unlocked() -> ::c_int; - pub fn putchar_unlocked(c: ::c_int) -> ::c_int; - - #[cfg_attr(target_os = "netbsd", link_name = "__socket30")] - pub fn socket(domain: ::c_int, ty: ::c_int, protocol: ::c_int) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "connect$UNIX2003")] - pub fn connect(socket: ::c_int, address: *const sockaddr, - len: socklen_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "bind$UNIX2003")] - pub fn bind(socket: ::c_int, address: *const sockaddr, - address_len: socklen_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "listen$UNIX2003")] - pub fn listen(socket: ::c_int, backlog: ::c_int) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "accept$UNIX2003")] - pub fn accept(socket: ::c_int, address: *mut sockaddr, - address_len: *mut socklen_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "getpeername$UNIX2003")] - pub fn getpeername(socket: ::c_int, address: *mut sockaddr, - address_len: *mut socklen_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "getsockname$UNIX2003")] - pub fn getsockname(socket: ::c_int, address: *mut sockaddr, - address_len: *mut socklen_t) -> ::c_int; - pub fn setsockopt(socket: ::c_int, level: ::c_int, name: ::c_int, - value: *const ::c_void, - option_len: socklen_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "socketpair$UNIX2003")] - pub fn socketpair(domain: ::c_int, type_: ::c_int, protocol: ::c_int, - socket_vector: *mut ::c_int) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "sendto$UNIX2003")] - pub fn sendto(socket: ::c_int, buf: *const ::c_void, len: ::size_t, - flags: ::c_int, addr: *const sockaddr, - addrlen: socklen_t) -> ::ssize_t; - pub fn shutdown(socket: ::c_int, how: ::c_int) -> ::c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "chmod$UNIX2003")] - pub fn chmod(path: *const c_char, mode: mode_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "fchmod$UNIX2003")] - pub fn fchmod(fd: ::c_int, mode: mode_t) -> ::c_int; - - #[cfg_attr(target_os = "macos", link_name = "fstat$INODE64")] - #[cfg_attr(target_os = "netbsd", link_name = "__fstat50")] - pub fn fstat(fildes: ::c_int, buf: *mut stat) -> ::c_int; - - pub fn mkdir(path: *const c_char, mode: mode_t) -> ::c_int; - - #[cfg_attr(target_os = "macos", link_name = "stat$INODE64")] - #[cfg_attr(target_os = "netbsd", link_name = "__stat50")] - pub fn stat(path: *const c_char, buf: *mut stat) -> ::c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "popen$UNIX2003")] - pub fn popen(command: *const c_char, - mode: *const c_char) -> *mut ::FILE; - pub fn pclose(stream: *mut ::FILE) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "fdopen$UNIX2003")] - pub fn fdopen(fd: ::c_int, mode: *const c_char) -> *mut ::FILE; - pub fn fileno(stream: *mut ::FILE) -> ::c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "open$UNIX2003")] - pub fn open(path: *const c_char, oflag: ::c_int, ...) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "creat$UNIX2003")] - pub fn creat(path: *const c_char, mode: mode_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "fcntl$UNIX2003")] - pub fn fcntl(fd: ::c_int, cmd: ::c_int, ...) -> ::c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), - link_name = "opendir$INODE64")] - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "opendir$INODE64$UNIX2003")] - #[cfg_attr(target_os = "netbsd", link_name = "__opendir30")] - pub fn opendir(dirname: *const c_char) -> *mut ::DIR; - #[cfg_attr(target_os = "macos", link_name = "readdir_r$INODE64")] - #[cfg_attr(target_os = "netbsd", link_name = "__readdir_r30")] - pub fn readdir_r(dirp: *mut ::DIR, entry: *mut ::dirent, - result: *mut *mut ::dirent) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "closedir$UNIX2003")] - pub fn closedir(dirp: *mut ::DIR) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), - link_name = "rewinddir$INODE64")] - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "rewinddir$INODE64$UNIX2003")] - pub fn rewinddir(dirp: *mut ::DIR); - - pub fn access(path: *const c_char, amode: ::c_int) -> ::c_int; - pub fn alarm(seconds: ::c_uint) -> ::c_uint; - pub fn chdir(dir: *const c_char) -> ::c_int; - pub fn chown(path: *const c_char, uid: uid_t, - gid: gid_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "lchown$UNIX2003")] - pub fn lchown(path: *const c_char, uid: uid_t, - gid: gid_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "close$UNIX2003")] - pub fn close(fd: ::c_int) -> ::c_int; - pub fn dup(fd: ::c_int) -> ::c_int; - pub fn dup2(src: ::c_int, dst: ::c_int) -> ::c_int; - pub fn execv(prog: *const c_char, - argv: *const *const c_char) -> ::c_int; - pub fn execve(prog: *const c_char, argv: *const *const c_char, - envp: *const *const c_char) - -> ::c_int; - pub fn execvp(c: *const c_char, - argv: *const *const c_char) -> ::c_int; - pub fn fork() -> pid_t; - pub fn fpathconf(filedes: ::c_int, name: ::c_int) -> c_long; - pub fn getcwd(buf: *mut c_char, size: ::size_t) -> *mut c_char; - pub fn getegid() -> gid_t; - pub fn geteuid() -> uid_t; - pub fn getgid() -> gid_t; - pub fn getgroups(ngroups_max: ::c_int, groups: *mut gid_t) - -> ::c_int; - pub fn getlogin() -> *mut c_char; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "getopt$UNIX2003")] - pub fn getopt(argc: ::c_int, argv: *const *mut c_char, - optstr: *const c_char) -> ::c_int; - pub fn getpgid(pid: pid_t) -> pid_t; - pub fn getpgrp() -> pid_t; - pub fn getpid() -> pid_t; - pub fn getppid() -> pid_t; - pub fn getuid() -> uid_t; - pub fn isatty(fd: ::c_int) -> ::c_int; - pub fn link(src: *const c_char, dst: *const c_char) -> ::c_int; - pub fn lseek(fd: ::c_int, offset: off_t, whence: ::c_int) -> off_t; - pub fn pathconf(path: *const c_char, name: ::c_int) -> c_long; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pause$UNIX2003")] - pub fn pause() -> ::c_int; - pub fn pipe(fds: *mut ::c_int) -> ::c_int; - pub fn posix_memalign(memptr: *mut *mut ::c_void, - align: ::size_t, - size: ::size_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "read$UNIX2003")] - pub fn read(fd: ::c_int, buf: *mut ::c_void, count: ::size_t) - -> ::ssize_t; - pub fn rmdir(path: *const c_char) -> ::c_int; - pub fn setgid(gid: gid_t) -> ::c_int; - pub fn setpgid(pid: pid_t, pgid: pid_t) -> ::c_int; - pub fn setsid() -> pid_t; - pub fn setuid(uid: uid_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "sleep$UNIX2003")] - pub fn sleep(secs: ::c_uint) -> ::c_uint; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "nanosleep$UNIX2003")] - #[cfg_attr(target_os = "netbsd", link_name = "__nanosleep50")] - pub fn nanosleep(rqtp: *const timespec, - rmtp: *mut timespec) -> ::c_int; - pub fn tcgetpgrp(fd: ::c_int) -> pid_t; - pub fn tcsetpgrp(fd: ::c_int, pgrp: ::pid_t) -> ::c_int; - pub fn ttyname(fd: ::c_int) -> *mut c_char; - pub fn unlink(c: *const c_char) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "wait$UNIX2003")] - pub fn wait(status: *mut ::c_int) -> pid_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "waitpid$UNIX2003")] - pub fn waitpid(pid: pid_t, status: *mut ::c_int, options: ::c_int) - -> pid_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "write$UNIX2003")] - pub fn write(fd: ::c_int, buf: *const ::c_void, count: ::size_t) - -> ::ssize_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pread$UNIX2003")] - pub fn pread(fd: ::c_int, buf: *mut ::c_void, count: ::size_t, - offset: off_t) -> ::ssize_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pwrite$UNIX2003")] - pub fn pwrite(fd: ::c_int, buf: *const ::c_void, count: ::size_t, - offset: off_t) -> ::ssize_t; - pub fn umask(mask: mode_t) -> mode_t; - - #[cfg_attr(target_os = "netbsd", link_name = "__utime50")] - pub fn utime(file: *const c_char, buf: *const utimbuf) -> ::c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "kill$UNIX2003")] - pub fn kill(pid: pid_t, sig: ::c_int) -> ::c_int; - - pub fn mlock(addr: *const ::c_void, len: ::size_t) -> ::c_int; - pub fn munlock(addr: *const ::c_void, len: ::size_t) -> ::c_int; - pub fn mlockall(flags: ::c_int) -> ::c_int; - pub fn munlockall() -> ::c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "mmap$UNIX2003")] - pub fn mmap(addr: *mut ::c_void, - len: ::size_t, - prot: ::c_int, - flags: ::c_int, - fd: ::c_int, - offset: off_t) - -> *mut ::c_void; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "munmap$UNIX2003")] - pub fn munmap(addr: *mut ::c_void, len: ::size_t) -> ::c_int; - - pub fn if_nametoindex(ifname: *const c_char) -> ::c_uint; - pub fn if_indextoname(ifindex: ::c_uint, - ifname: *mut ::c_char) -> *mut ::c_char; - - #[cfg_attr(target_os = "macos", link_name = "lstat$INODE64")] - #[cfg_attr(target_os = "netbsd", link_name = "__lstat50")] - pub fn lstat(path: *const c_char, buf: *mut stat) -> ::c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "fsync$UNIX2003")] - pub fn fsync(fd: ::c_int) -> ::c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "setenv$UNIX2003")] - pub fn setenv(name: *const c_char, val: *const c_char, - overwrite: ::c_int) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "unsetenv$UNIX2003")] - #[cfg_attr(target_os = "netbsd", link_name = "__unsetenv13")] - pub fn unsetenv(name: *const c_char) -> ::c_int; - - pub fn symlink(path1: *const c_char, - path2: *const c_char) -> ::c_int; - - pub fn ftruncate(fd: ::c_int, length: off_t) -> ::c_int; - - pub fn signal(signum: ::c_int, handler: sighandler_t) -> sighandler_t; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "getrlimit$UNIX2003")] - pub fn getrlimit(resource: ::c_int, rlim: *mut rlimit) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "setrlimit$UNIX2003")] - pub fn setrlimit(resource: ::c_int, rlim: *const rlimit) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__getrusage50")] - pub fn getrusage(resource: ::c_int, usage: *mut rusage) -> ::c_int; - - pub fn getdtablesize() -> ::c_int; - #[cfg_attr(any(target_os = "macos", target_os = "ios"), - link_name = "realpath$DARWIN_EXTSN")] - pub fn realpath(pathname: *const ::c_char, resolved: *mut ::c_char) - -> *mut ::c_char; - - pub fn flock(fd: ::c_int, operation: ::c_int) -> ::c_int; - - #[cfg_attr(target_os = "netbsd", link_name = "__gettimeofday50")] - pub fn gettimeofday(tp: *mut ::timeval, - tz: *mut ::c_void) -> ::c_int; - - pub fn pthread_self() -> ::pthread_t; - pub fn pthread_create(native: *mut ::pthread_t, - attr: *const ::pthread_attr_t, - f: extern fn(*mut ::c_void) -> *mut ::c_void, - value: *mut ::c_void) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_join$UNIX2003")] - pub fn pthread_join(native: ::pthread_t, - value: *mut *mut ::c_void) -> ::c_int; - pub fn pthread_attr_init(attr: *mut ::pthread_attr_t) -> ::c_int; - pub fn pthread_attr_destroy(attr: *mut ::pthread_attr_t) -> ::c_int; - pub fn pthread_attr_setstacksize(attr: *mut ::pthread_attr_t, - stack_size: ::size_t) -> ::c_int; - pub fn pthread_attr_setdetachstate(attr: *mut ::pthread_attr_t, - state: ::c_int) -> ::c_int; - pub fn pthread_detach(thread: ::pthread_t) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__libc_thr_yield")] - pub fn sched_yield() -> ::c_int; - pub fn pthread_key_create(key: *mut pthread_key_t, - dtor: Option) - -> ::c_int; - pub fn pthread_key_delete(key: pthread_key_t) -> ::c_int; - pub fn pthread_getspecific(key: pthread_key_t) -> *mut ::c_void; - pub fn pthread_setspecific(key: pthread_key_t, value: *const ::c_void) - -> ::c_int; - pub fn pthread_mutex_init(lock: *mut pthread_mutex_t, - attr: *const pthread_mutexattr_t) -> ::c_int; - pub fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> ::c_int; - pub fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> ::c_int; - pub fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> ::c_int; - pub fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> ::c_int; - - pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_mutexattr_destroy$UNIX2003")] - pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> ::c_int; - pub fn pthread_mutexattr_settype(attr: *mut pthread_mutexattr_t, - _type: ::c_int) -> ::c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_cond_init$UNIX2003")] - pub fn pthread_cond_init(cond: *mut pthread_cond_t, - attr: *const pthread_condattr_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_cond_wait$UNIX2003")] - pub fn pthread_cond_wait(cond: *mut pthread_cond_t, - lock: *mut pthread_mutex_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_cond_timedwait$UNIX2003")] - pub fn pthread_cond_timedwait(cond: *mut pthread_cond_t, - lock: *mut pthread_mutex_t, - abstime: *const ::timespec) -> ::c_int; - pub fn pthread_cond_signal(cond: *mut pthread_cond_t) -> ::c_int; - pub fn pthread_cond_broadcast(cond: *mut pthread_cond_t) -> ::c_int; - pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> ::c_int; - pub fn pthread_condattr_init(attr: *mut pthread_condattr_t) -> ::c_int; - pub fn pthread_condattr_destroy(attr: *mut pthread_condattr_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_destroy$UNIX2003")] - pub fn pthread_rwlock_destroy(lock: *mut pthread_rwlock_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_rdlock$UNIX2003")] - pub fn pthread_rwlock_rdlock(lock: *mut pthread_rwlock_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_tryrdlock$UNIX2003")] - pub fn pthread_rwlock_tryrdlock(lock: *mut pthread_rwlock_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_wrlock$UNIX2003")] - pub fn pthread_rwlock_wrlock(lock: *mut pthread_rwlock_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_trywrlock$UNIX2003")] - pub fn pthread_rwlock_trywrlock(lock: *mut pthread_rwlock_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_unlock$UNIX2003")] - pub fn pthread_rwlock_unlock(lock: *mut pthread_rwlock_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_sigmask$UNIX2003")] - pub fn pthread_sigmask(how: ::c_int, set: *const sigset_t, - oldset: *mut sigset_t) -> ::c_int; - pub fn pthread_kill(thread: ::pthread_t, sig: ::c_int) -> ::c_int; - #[cfg_attr(all(target_os = "linux", not(target_env = "musl")), - link_name = "__xpg_strerror_r")] - pub fn strerror_r(errnum: ::c_int, buf: *mut c_char, - buflen: ::size_t) -> ::c_int; - - pub fn getsockopt(sockfd: ::c_int, - level: ::c_int, - optname: ::c_int, - optval: *mut ::c_void, - optlen: *mut ::socklen_t) -> ::c_int; - pub fn raise(signum: ::c_int) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigaction14")] - pub fn sigaction(signum: ::c_int, - act: *const sigaction, - oldact: *mut sigaction) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "sigaltstack$UNIX2003")] - #[cfg_attr(target_os = "netbsd", link_name = "__sigaltstack14")] - pub fn sigaltstack(ss: *const stack_t, - oss: *mut stack_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch ="x86"), - link_name = "sigwait$UNIX2003")] - pub fn sigwait(set: *const sigset_t, - sig: *mut ::c_int) -> ::c_int; - - #[cfg_attr(target_os = "netbsd", link_name = "__utimes50")] - pub fn utimes(filename: *const ::c_char, - times: *const ::timeval) -> ::c_int; - pub fn dlopen(filename: *const ::c_char, - flag: ::c_int) -> *mut ::c_void; - pub fn dlerror() -> *mut ::c_char; - pub fn dlsym(handle: *mut ::c_void, - symbol: *const ::c_char) -> *mut ::c_void; - pub fn dlclose(handle: *mut ::c_void) -> ::c_int; - pub fn dladdr(addr: *const ::c_void, info: *mut Dl_info) -> ::c_int; - - pub fn getaddrinfo(node: *const c_char, - service: *const c_char, - hints: *const addrinfo, - res: *mut *mut addrinfo) -> ::c_int; - pub fn freeaddrinfo(res: *mut addrinfo); - pub fn gai_strerror(errcode: ::c_int) -> *const ::c_char; - - #[cfg_attr(target_os = "netbsd", link_name = "__gmtime_r50")] - pub fn gmtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; - #[cfg_attr(target_os = "netbsd", link_name = "__localtime_r50")] - pub fn localtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "mktime$UNIX2003")] - #[cfg_attr(target_os = "netbsd", link_name = "__mktime50")] - pub fn mktime(tm: *mut tm) -> time_t; - #[cfg_attr(target_os = "netbsd", link_name = "__time50")] - pub fn time(time: *mut time_t) -> time_t; - #[cfg_attr(target_os = "netbsd", link_name = "__locatime50")] - pub fn localtime(time: *const time_t) -> *mut tm; - - #[cfg_attr(target_os = "netbsd", link_name = "__mknod50")] - pub fn mknod(pathname: *const ::c_char, mode: ::mode_t, - dev: ::dev_t) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "writev$UNIX2003")] - pub fn writev(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int) -> ::ssize_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "readv$UNIX2003")] - pub fn readv(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int) -> ::ssize_t; - pub fn uname(buf: *mut ::utsname) -> ::c_int; - pub fn daemon(nochdir: ::c_int, noclose: ::c_int) -> ::c_int; - pub fn gethostname(name: *mut ::c_char, len: ::size_t) -> ::c_int; - pub fn chroot(name: *const ::c_char) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "usleep$UNIX2003")] - pub fn usleep(secs: ::c_uint) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "send$UNIX2003")] - pub fn send(socket: ::c_int, buf: *const ::c_void, len: ::size_t, - flags: ::c_int) -> ::ssize_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "recv$UNIX2003")] - pub fn recv(socket: ::c_int, buf: *mut ::c_void, len: ::size_t, - flags: ::c_int) -> ::ssize_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "putenv$UNIX2003")] - #[cfg_attr(target_os = "netbsd", link_name = "__putenv50")] - pub fn putenv(string: *mut c_char) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "sendmsg$UNIX2003")] - pub fn sendmsg(fd: ::c_int, - msg: *const msghdr, - flags: ::c_int) -> ::ssize_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "recvmsg$UNIX2003")] - pub fn recvmsg(fd: ::c_int, msg: *mut msghdr, flags: ::c_int) -> ::ssize_t; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "poll$UNIX2003")] - pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: ::c_int) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), - link_name = "select$1050")] - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "select$UNIX2003")] - #[cfg_attr(target_os = "netbsd", link_name = "__select50")] - pub fn select(nfds: ::c_int, - readfs: *mut fd_set, - writefds: *mut fd_set, - errorfds: *mut fd_set, - timeout: *mut timeval) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__setlocale50")] - pub fn setlocale(category: ::c_int, - locale: *const ::c_char) -> *mut ::c_char; - pub fn localeconv() -> *mut lconv; - - pub fn sem_destroy(sem: *mut sem_t) -> ::c_int; - pub fn sem_open(name: *const ::c_char, oflag: ::c_int, ...) -> *mut sem_t; - pub fn sem_close(sem: *mut sem_t) -> ::c_int; - pub fn sem_unlink(name: *const ::c_char) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "sem_wait$UNIX2003")] - pub fn sem_wait(sem: *mut sem_t) -> ::c_int; - pub fn sem_trywait(sem: *mut sem_t) -> ::c_int; - pub fn sem_post(sem: *mut sem_t) -> ::c_int; - pub fn sem_init(sem: *mut sem_t, - pshared: ::c_int, - value: ::c_uint) - -> ::c_int; -} - -// TODO: get rid of this cfg(not(...)) -#[cfg(not(target_os = "android"))] // " if " -- appease style checker -extern { - #[cfg_attr(target_os = "macos", link_name = "glob$INODE64")] - #[cfg_attr(target_os = "netbsd", link_name = "__glob30")] - pub fn glob(pattern: *const c_char, - flags: ::c_int, - errfunc: Option ::c_int>, - pglob: *mut glob_t) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__globfree30")] - pub fn globfree(pglob: *mut glob_t); - - pub fn posix_madvise(addr: *mut ::c_void, len: ::size_t, advice: ::c_int) - -> ::c_int; - - pub fn shm_unlink(name: *const c_char) -> ::c_int; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), - link_name = "seekdir$INODE64")] - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "seekdir$INODE64$UNIX2003")] - pub fn seekdir(dirp: *mut ::DIR, loc: c_long); - - #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), - link_name = "telldir$INODE64")] - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "telldir$INODE64$UNIX2003")] - pub fn telldir(dirp: *mut ::DIR) -> c_long; - - pub fn getsid(pid: pid_t) -> pid_t; - pub fn madvise(addr: *mut ::c_void, len: ::size_t, advice: ::c_int) - -> ::c_int; - pub fn readlink(path: *const c_char, - buf: *mut c_char, - bufsz: ::size_t) - -> ::ssize_t; - - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "msync$UNIX2003")] - #[cfg_attr(target_os = "netbsd", link_name = "__msync13")] - pub fn msync(addr: *mut ::c_void, len: ::size_t, flags: ::c_int) -> ::c_int; - pub fn sysconf(name: ::c_int) -> c_long; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "recvfrom$UNIX2003")] - pub fn recvfrom(socket: ::c_int, buf: *mut ::c_void, len: ::size_t, - flags: ::c_int, addr: *mut sockaddr, - addrlen: *mut socklen_t) -> ::ssize_t; - pub fn mkfifo(path: *const c_char, mode: mode_t) -> ::c_int; - - #[cfg_attr(target_os = "netbsd", link_name = "__sigemptyset14")] - pub fn sigemptyset(set: *mut sigset_t) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigaddset14")] - pub fn sigaddset(set: *mut sigset_t, signum: ::c_int) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigfillset14")] - pub fn sigfillset(set: *mut sigset_t) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigdelset14")] - pub fn sigdelset(set: *mut sigset_t, signum: ::c_int) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigismember14")] - pub fn sigismember(set: *const sigset_t, signum: ::c_int) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), - link_name = "pselect$1050")] - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "pselect$UNIX2003")] - #[cfg_attr(target_os = "netbsd", link_name = "__pselect50")] - pub fn pselect(nfds: ::c_int, - readfs: *mut fd_set, - writefds: *mut fd_set, - errorfds: *mut fd_set, - timeout: *const timespec, - sigmask: *const sigset_t) -> ::c_int; - pub fn fseeko(stream: *mut ::FILE, - offset: ::off_t, - whence: ::c_int) -> ::c_int; - pub fn ftello(stream: *mut ::FILE) -> ::off_t; - #[cfg_attr(target_os = "netbsd", link_name = "__timegm50")] - pub fn timegm(tm: *mut ::tm) -> time_t; - pub fn statvfs(path: *const c_char, buf: *mut statvfs) -> ::c_int; - pub fn fstatvfs(fd: ::c_int, buf: *mut statvfs) -> ::c_int; - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "tcdrain$UNIX2003")] - pub fn tcdrain(fd: ::c_int) -> ::c_int; - pub fn cfgetispeed(termios: *const ::termios) -> ::speed_t; - pub fn cfgetospeed(termios: *const ::termios) -> ::speed_t; - pub fn cfsetispeed(termios: *mut ::termios, speed: ::speed_t) -> ::c_int; - pub fn cfsetospeed(termios: *mut ::termios, speed: ::speed_t) -> ::c_int; - pub fn tcgetattr(fd: ::c_int, termios: *mut ::termios) -> ::c_int; - pub fn tcsetattr(fd: ::c_int, - optional_actions: ::c_int, - termios: *const ::termios) -> ::c_int; - pub fn tcflow(fd: ::c_int, action: ::c_int) -> ::c_int; - pub fn tcflush(fd: ::c_int, action: ::c_int) -> ::c_int; - pub fn tcsendbreak(fd: ::c_int, duration: ::c_int) -> ::c_int; - pub fn mkstemp(template: *mut ::c_char) -> ::c_int; - pub fn mkstemps(template: *mut ::c_char, suffixlen: ::c_int) -> ::c_int; - pub fn mkdtemp(template: *mut ::c_char) -> *mut ::c_char; - pub fn futimes(fd: ::c_int, times: *const ::timeval) -> ::c_int; - pub fn nl_langinfo(item: ::nl_item) -> *mut ::c_char; - - pub fn openlog(ident: *const ::c_char, logopt: ::c_int, facility: ::c_int); - pub fn closelog(); - pub fn setlogmask(maskpri: ::c_int) -> ::c_int; - pub fn syslog(priority: ::c_int, message: *const ::c_char, ...); - #[cfg_attr(all(target_os = "macos", target_arch = "x86"), - link_name = "nice$UNIX2003")] - pub fn nice(incr: ::c_int) -> ::c_int; - - pub fn grantpt(fd: ::c_int) -> ::c_int; - pub fn posix_openpt(flags: ::c_int) -> ::c_int; - pub fn ptsname(fd: ::c_int) -> *mut ::c_char; - pub fn unlockpt(fd: ::c_int) -> ::c_int; -} - -cfg_if! { - if #[cfg(any(target_os = "linux", - target_os = "android", - target_os = "emscripten", - target_os = "fuchsia"))] { - mod notbsd; - pub use self::notbsd::*; - } else if #[cfg(any(target_os = "macos", - target_os = "ios", - target_os = "freebsd", - target_os = "dragonfly", - target_os = "openbsd", - target_os = "netbsd", - target_os = "bitrig"))] { - mod bsd; - pub use self::bsd::*; - } else if #[cfg(target_os = "solaris")] { - mod solaris; - pub use self::solaris::*; - } else if #[cfg(target_os = "haiku")] { - mod haiku; - pub use self::haiku::*; - } else { - // Unknown target_os - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/android/b32.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/android/b32.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/android/b32.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/android/b32.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,151 +0,0 @@ -pub type c_long = i32; -pub type c_ulong = u32; -pub type mode_t = u16; -pub type off64_t = ::c_longlong; - -s! { - pub struct sigaction { - pub sa_sigaction: ::sighandler_t, - pub sa_mask: ::sigset_t, - pub sa_flags: ::c_ulong, - pub sa_restorer: ::dox::Option, - } - - pub struct stat { - pub st_dev: ::c_ulonglong, - __pad0: [::c_uchar; 4], - __st_ino: ::ino_t, - pub st_mode: ::c_uint, - pub st_nlink: ::c_uint, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::c_ulonglong, - __pad3: [::c_uchar; 4], - pub st_size: ::c_longlong, - pub st_blksize: ::blksize_t, - pub st_blocks: ::c_ulonglong, - pub st_atime: ::c_ulong, - pub st_atime_nsec: ::c_ulong, - pub st_mtime: ::c_ulong, - pub st_mtime_nsec: ::c_ulong, - pub st_ctime: ::c_ulong, - pub st_ctime_nsec: ::c_ulong, - pub st_ino: ::c_ulonglong, - } - - pub struct stat64 { - pub st_dev: ::c_ulonglong, - __pad0: [::c_uchar; 4], - __st_ino: ::ino_t, - pub st_mode: ::c_uint, - pub st_nlink: ::c_uint, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::c_ulonglong, - __pad3: [::c_uchar; 4], - pub st_size: ::c_longlong, - pub st_blksize: ::blksize_t, - pub st_blocks: ::c_ulonglong, - pub st_atime: ::c_ulong, - pub st_atime_nsec: ::c_ulong, - pub st_mtime: ::c_ulong, - pub st_mtime_nsec: ::c_ulong, - pub st_ctime: ::c_ulong, - pub st_ctime_nsec: ::c_ulong, - pub st_ino: ::c_ulonglong, - } - - pub struct pthread_attr_t { - pub flags: ::uint32_t, - pub stack_base: *mut ::c_void, - pub stack_size: ::size_t, - pub guard_size: ::size_t, - pub sched_policy: ::int32_t, - pub sched_priority: ::int32_t, - } - - pub struct pthread_mutex_t { value: ::c_int } - - pub struct pthread_cond_t { value: ::c_int } - - pub struct pthread_rwlock_t { - lock: pthread_mutex_t, - cond: pthread_cond_t, - numLocks: ::c_int, - writerThreadId: ::c_int, - pendingReaders: ::c_int, - pendingWriters: ::c_int, - attr: i32, - __reserved: [::c_char; 12], - } - - pub struct passwd { - pub pw_name: *mut ::c_char, - pub pw_passwd: *mut ::c_char, - pub pw_uid: ::uid_t, - pub pw_gid: ::gid_t, - pub pw_dir: *mut ::c_char, - pub pw_shell: *mut ::c_char, - } - - pub struct statfs { - pub f_type: ::uint32_t, - pub f_bsize: ::uint32_t, - pub f_blocks: ::uint64_t, - pub f_bfree: ::uint64_t, - pub f_bavail: ::uint64_t, - pub f_files: ::uint64_t, - pub f_ffree: ::uint64_t, - pub f_fsid: ::__fsid_t, - pub f_namelen: ::uint32_t, - pub f_frsize: ::uint32_t, - pub f_flags: ::uint32_t, - pub f_spare: [::uint32_t; 4], - } - - pub struct sysinfo { - pub uptime: ::c_long, - pub loads: [::c_ulong; 3], - pub totalram: ::c_ulong, - pub freeram: ::c_ulong, - pub sharedram: ::c_ulong, - pub bufferram: ::c_ulong, - pub totalswap: ::c_ulong, - pub freeswap: ::c_ulong, - pub procs: ::c_ushort, - pub pad: ::c_ushort, - pub totalhigh: ::c_ulong, - pub freehigh: ::c_ulong, - pub mem_unit: ::c_uint, - pub _f: [::c_char; 8], - } -} - -pub const SYS_gettid: ::c_long = 224; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - value: 0, -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - value: 0, -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - lock: PTHREAD_MUTEX_INITIALIZER, - cond: PTHREAD_COND_INITIALIZER, - numLocks: 0, - writerThreadId: 0, - pendingReaders: 0, - pendingWriters: 0, - attr: 0, - __reserved: [0; 12], -}; -pub const PTHREAD_STACK_MIN: ::size_t = 4096 * 2; -pub const CPU_SETSIZE: ::size_t = 32; -pub const __CPU_BITS: ::size_t = 32; - -pub const UT_LINESIZE: usize = 8; -pub const UT_NAMESIZE: usize = 8; -pub const UT_HOSTSIZE: usize = 16; - -extern { - pub fn timegm64(tm: *const ::tm) -> ::time64_t; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/android/b64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/android/b64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/android/b64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/android/b64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,161 +0,0 @@ -// The following definitions are correct for aarch64 and may be wrong for x86_64 - -pub type c_long = i64; -pub type c_ulong = u64; -pub type mode_t = u32; -pub type off64_t = i64; - -s! { - pub struct sigaction { - pub sa_flags: ::c_uint, - pub sa_sigaction: ::sighandler_t, - pub sa_mask: ::sigset_t, - _restorer: *mut ::c_void, - } - - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::c_uint, - pub st_nlink: ::c_uint, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad1: ::c_ulong, - pub st_size: ::off64_t, - pub st_blksize: ::c_int, - __pad2: ::c_int, - pub st_blocks: ::c_long, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_ulong, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_ulong, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_ulong, - __unused4: ::c_uint, - __unused5: ::c_uint, - } - - pub struct stat64 { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::c_uint, - pub st_nlink: ::c_uint, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad1: ::c_ulong, - pub st_size: ::off64_t, - pub st_blksize: ::c_int, - __pad2: ::c_int, - pub st_blocks: ::c_long, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_ulong, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_ulong, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_ulong, - __unused4: ::c_uint, - __unused5: ::c_uint, - } - - pub struct pthread_attr_t { - pub flags: ::uint32_t, - pub stack_base: *mut ::c_void, - pub stack_size: ::size_t, - pub guard_size: ::size_t, - pub sched_policy: ::int32_t, - pub sched_priority: ::int32_t, - __reserved: [::c_char; 16], - } - - pub struct pthread_mutex_t { - value: ::c_int, - __reserved: [::c_char; 36], - } - - pub struct pthread_cond_t { - value: ::c_int, - __reserved: [::c_char; 44], - } - - pub struct pthread_rwlock_t { - numLocks: ::c_int, - writerThreadId: ::c_int, - pendingReaders: ::c_int, - pendingWriters: ::c_int, - attr: i32, - __reserved: [::c_char; 36], - } - - pub struct passwd { - pub pw_name: *mut ::c_char, - pub pw_passwd: *mut ::c_char, - pub pw_uid: ::uid_t, - pub pw_gid: ::gid_t, - pub pw_gecos: *mut ::c_char, - pub pw_dir: *mut ::c_char, - pub pw_shell: *mut ::c_char, - } - - pub struct statfs { - pub f_type: ::uint64_t, - pub f_bsize: ::uint64_t, - pub f_blocks: ::uint64_t, - pub f_bfree: ::uint64_t, - pub f_bavail: ::uint64_t, - pub f_files: ::uint64_t, - pub f_ffree: ::uint64_t, - pub f_fsid: ::__fsid_t, - pub f_namelen: ::uint64_t, - pub f_frsize: ::uint64_t, - pub f_flags: ::uint64_t, - pub f_spare: [::uint64_t; 4], - } - - pub struct sysinfo { - pub uptime: ::c_long, - pub loads: [::c_ulong; 3], - pub totalram: ::c_ulong, - pub freeram: ::c_ulong, - pub sharedram: ::c_ulong, - pub bufferram: ::c_ulong, - pub totalswap: ::c_ulong, - pub freeswap: ::c_ulong, - pub procs: ::c_ushort, - pub pad: ::c_ushort, - pub totalhigh: ::c_ulong, - pub freehigh: ::c_ulong, - pub mem_unit: ::c_uint, - pub _f: [::c_char; 0], - } -} - -pub const SYS_gettid: ::c_long = 178; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - value: 0, - __reserved: [0; 36], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - value: 0, - __reserved: [0; 44], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - numLocks: 0, - writerThreadId: 0, - pendingReaders: 0, - pendingWriters: 0, - attr: 0, - __reserved: [0; 36], -}; -pub const PTHREAD_STACK_MIN: ::size_t = 4096 * 4; -pub const CPU_SETSIZE: ::size_t = 1024; -pub const __CPU_BITS: ::size_t = 64; - -pub const UT_LINESIZE: usize = 32; -pub const UT_NAMESIZE: usize = 32; -pub const UT_HOSTSIZE: usize = 256; - -extern { - pub fn timegm(tm: *const ::tm) -> ::time64_t; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/android/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/android/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/android/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/android/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,782 +0,0 @@ -//! Android-specific definitions for linux-like values - -use dox::mem; - -pub type c_char = u8; -pub type clock_t = ::c_long; -pub type time_t = ::c_long; -pub type suseconds_t = ::c_long; -pub type wchar_t = u32; -pub type off_t = ::c_long; -pub type blkcnt_t = ::c_ulong; -pub type blksize_t = ::c_ulong; -pub type nlink_t = u32; -pub type useconds_t = u32; -pub type socklen_t = i32; -pub type pthread_t = ::c_long; -pub type pthread_mutexattr_t = ::c_long; -pub type pthread_condattr_t = ::c_long; -pub type sigset_t = ::c_ulong; -pub type time64_t = i64; // N/A on android -pub type fsfilcnt_t = ::c_ulong; -pub type fsblkcnt_t = ::c_ulong; -pub type nfds_t = ::c_uint; -pub type rlim_t = ::c_ulong; -pub type dev_t = ::c_ulong; -pub type ino_t = ::c_ulong; -pub type __CPU_BITTYPE = ::c_ulong; - -s! { - pub struct dirent { - pub d_ino: u64, - pub d_off: i64, - pub d_reclen: ::c_ushort, - pub d_type: ::c_uchar, - pub d_name: [::c_char; 256], - } - - pub struct dirent64 { - pub d_ino: u64, - pub d_off: i64, - pub d_reclen: ::c_ushort, - pub d_type: ::c_uchar, - pub d_name: [::c_char; 256], - } - - pub struct rlimit64 { - pub rlim_cur: u64, - pub rlim_max: u64, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_flags: ::c_int, - pub ss_size: ::size_t - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_errno: ::c_int, - pub si_code: ::c_int, - pub _pad: [::c_int; 29], - } - - pub struct __fsid_t { - __val: [::c_int; 2], - } - - pub struct msghdr { - pub msg_name: *mut ::c_void, - pub msg_namelen: ::c_int, - pub msg_iov: *mut ::iovec, - pub msg_iovlen: ::size_t, - pub msg_control: *mut ::c_void, - pub msg_controllen: ::size_t, - pub msg_flags: ::c_int, - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_line: ::cc_t, - pub c_cc: [::cc_t; ::NCCS], - } - - pub struct flock { - pub l_type: ::c_short, - pub l_whence: ::c_short, - pub l_start: ::off_t, - pub l_len: ::off_t, - pub l_pid: ::pid_t, - } - - pub struct cpu_set_t { - #[cfg(target_pointer_width = "64")] - __bits: [__CPU_BITTYPE; 16], - #[cfg(target_pointer_width = "32")] - __bits: [__CPU_BITTYPE; 1], - } - - pub struct sem_t { - count: ::c_uint, - } - - pub struct lastlog { - ll_time: ::time_t, - ll_line: [::c_char; UT_LINESIZE], - ll_host: [::c_char; UT_HOSTSIZE], - } - - pub struct exit_status { - pub e_termination: ::c_short, - pub e_exit: ::c_short, - } - - pub struct utmp { - pub ut_type: ::c_short, - pub ut_pid: ::pid_t, - pub ut_line: [::c_char; UT_LINESIZE], - pub ut_id: [::c_char; 4], - - pub ut_user: [::c_char; UT_NAMESIZE], - pub ut_host: [::c_char; UT_HOSTSIZE], - pub ut_exit: exit_status, - pub ut_session: ::c_long, - pub ut_tv: ::timeval, - - pub ut_addr_v6: [::int32_t; 4], - unused: [::c_char; 20], - } -} - -pub const USER_PROCESS: ::c_short = 7; - -pub const BUFSIZ: ::c_uint = 1024; -pub const FILENAME_MAX: ::c_uint = 1024; -pub const FOPEN_MAX: ::c_uint = 20; -pub const POSIX_FADV_DONTNEED: ::c_int = 4; -pub const POSIX_FADV_NOREUSE: ::c_int = 5; -pub const L_tmpnam: ::c_uint = 1024; -pub const TMP_MAX: ::c_uint = 308915776; -pub const _PC_LINK_MAX: ::c_int = 1; -pub const _PC_MAX_CANON: ::c_int = 2; -pub const _PC_MAX_INPUT: ::c_int = 3; -pub const _PC_NAME_MAX: ::c_int = 4; -pub const _PC_PATH_MAX: ::c_int = 5; -pub const _PC_PIPE_BUF: ::c_int = 6; -pub const _PC_CHOWN_RESTRICTED: ::c_int = 14; -pub const _PC_NO_TRUNC: ::c_int = 15; -pub const _PC_VDISABLE: ::c_int = 16; - -pub const FIONBIO: ::c_int = 0x5421; - -pub const _SC_ARG_MAX: ::c_int = 0; -pub const _SC_BC_BASE_MAX: ::c_int = 1; -pub const _SC_BC_DIM_MAX: ::c_int = 2; -pub const _SC_BC_SCALE_MAX: ::c_int = 3; -pub const _SC_BC_STRING_MAX: ::c_int = 4; -pub const _SC_CHILD_MAX: ::c_int = 5; -pub const _SC_CLK_TCK: ::c_int = 6; -pub const _SC_COLL_WEIGHTS_MAX: ::c_int = 7; -pub const _SC_EXPR_NEST_MAX: ::c_int = 8; -pub const _SC_LINE_MAX: ::c_int = 9; -pub const _SC_NGROUPS_MAX: ::c_int = 10; -pub const _SC_OPEN_MAX: ::c_int = 11; -pub const _SC_2_C_BIND: ::c_int = 13; -pub const _SC_2_C_DEV: ::c_int = 14; -pub const _SC_2_C_VERSION: ::c_int = 15; -pub const _SC_2_CHAR_TERM: ::c_int = 16; -pub const _SC_2_FORT_DEV: ::c_int = 17; -pub const _SC_2_FORT_RUN: ::c_int = 18; -pub const _SC_2_LOCALEDEF: ::c_int = 19; -pub const _SC_2_SW_DEV: ::c_int = 20; -pub const _SC_2_UPE: ::c_int = 21; -pub const _SC_2_VERSION: ::c_int = 22; -pub const _SC_JOB_CONTROL: ::c_int = 23; -pub const _SC_SAVED_IDS: ::c_int = 24; -pub const _SC_VERSION: ::c_int = 25; -pub const _SC_RE_DUP_MAX: ::c_int = 26; -pub const _SC_STREAM_MAX: ::c_int = 27; -pub const _SC_TZNAME_MAX: ::c_int = 28; -pub const _SC_XOPEN_CRYPT: ::c_int = 29; -pub const _SC_XOPEN_ENH_I18N: ::c_int = 30; -pub const _SC_XOPEN_SHM: ::c_int = 31; -pub const _SC_XOPEN_VERSION: ::c_int = 32; -pub const _SC_XOPEN_XCU_VERSION: ::c_int = 33; -pub const _SC_XOPEN_REALTIME: ::c_int = 34; -pub const _SC_XOPEN_REALTIME_THREADS: ::c_int = 35; -pub const _SC_XOPEN_LEGACY: ::c_int = 36; -pub const _SC_ATEXIT_MAX: ::c_int = 37; -pub const _SC_IOV_MAX: ::c_int = 38; -pub const _SC_PAGESIZE: ::c_int = 39; -pub const _SC_PAGE_SIZE: ::c_int = 40; -pub const _SC_XOPEN_UNIX: ::c_int = 41; -pub const _SC_MQ_PRIO_MAX: ::c_int = 51; -pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 71; -pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 72; -pub const _SC_LOGIN_NAME_MAX: ::c_int = 73; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: ::c_int = 74; -pub const _SC_THREAD_KEYS_MAX: ::c_int = 75; -pub const _SC_THREAD_STACK_MIN: ::c_int = 76; -pub const _SC_THREAD_THREADS_MAX: ::c_int = 77; -pub const _SC_TTY_NAME_MAX: ::c_int = 78; -pub const _SC_THREADS: ::c_int = 79; -pub const _SC_THREAD_ATTR_STACKADDR: ::c_int = 80; -pub const _SC_THREAD_ATTR_STACKSIZE: ::c_int = 81; -pub const _SC_THREAD_PRIORITY_SCHEDULING: ::c_int = 82; -pub const _SC_THREAD_PRIO_INHERIT: ::c_int = 83; -pub const _SC_THREAD_PRIO_PROTECT: ::c_int = 84; -pub const _SC_THREAD_SAFE_FUNCTIONS: ::c_int = 85; -pub const _SC_NPROCESSORS_ONLN: ::c_int = 97; - -pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; -pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1; -pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_NORMAL; - -pub const FIOCLEX: ::c_int = 0x5451; - -pub const SA_ONSTACK: ::c_ulong = 0x08000000; -pub const SA_SIGINFO: ::c_ulong = 0x00000004; -pub const SA_NOCLDWAIT: ::c_ulong = 0x00000002; - -pub const SIGCHLD: ::c_int = 17; -pub const SIGBUS: ::c_int = 7; -pub const SIGUSR1: ::c_int = 10; -pub const SIGUSR2: ::c_int = 12; -pub const SIGCONT: ::c_int = 18; -pub const SIGSTOP: ::c_int = 19; -pub const SIGTSTP: ::c_int = 20; -pub const SIGURG: ::c_int = 23; -pub const SIGIO: ::c_int = 29; -pub const SIGSYS: ::c_int = 31; -pub const SIGSTKFLT: ::c_int = 16; -pub const SIGUNUSED: ::c_int = 31; -pub const SIGTTIN: ::c_int = 21; -pub const SIGTTOU: ::c_int = 22; -pub const SIGXCPU: ::c_int = 24; -pub const SIGXFSZ: ::c_int = 25; -pub const SIGVTALRM: ::c_int = 26; -pub const SIGPROF: ::c_int = 27; -pub const SIGWINCH: ::c_int = 28; -pub const SIGPOLL: ::c_int = 29; -pub const SIGPWR: ::c_int = 30; -pub const SIG_SETMASK: ::c_int = 2; -pub const SIG_BLOCK: ::c_int = 0x000000; -pub const SIG_UNBLOCK: ::c_int = 0x01; - -pub const RUSAGE_CHILDREN: ::c_int = -1; - -pub const LC_PAPER: ::c_int = 7; -pub const LC_NAME: ::c_int = 8; -pub const LC_ADDRESS: ::c_int = 9; -pub const LC_TELEPHONE: ::c_int = 10; -pub const LC_MEASUREMENT: ::c_int = 11; -pub const LC_IDENTIFICATION: ::c_int = 12; -pub const LC_PAPER_MASK: ::c_int = (1 << LC_PAPER); -pub const LC_NAME_MASK: ::c_int = (1 << LC_NAME); -pub const LC_ADDRESS_MASK: ::c_int = (1 << LC_ADDRESS); -pub const LC_TELEPHONE_MASK: ::c_int = (1 << LC_TELEPHONE); -pub const LC_MEASUREMENT_MASK: ::c_int = (1 << LC_MEASUREMENT); -pub const LC_IDENTIFICATION_MASK: ::c_int = (1 << LC_IDENTIFICATION); -pub const LC_ALL_MASK: ::c_int = ::LC_CTYPE_MASK - | ::LC_NUMERIC_MASK - | ::LC_TIME_MASK - | ::LC_COLLATE_MASK - | ::LC_MONETARY_MASK - | ::LC_MESSAGES_MASK - | LC_PAPER_MASK - | LC_NAME_MASK - | LC_ADDRESS_MASK - | LC_TELEPHONE_MASK - | LC_MEASUREMENT_MASK - | LC_IDENTIFICATION_MASK; - -pub const MAP_ANON: ::c_int = 0x0020; -pub const MAP_ANONYMOUS: ::c_int = 0x0020; -pub const MAP_GROWSDOWN: ::c_int = 0x0100; -pub const MAP_DENYWRITE: ::c_int = 0x0800; -pub const MAP_EXECUTABLE: ::c_int = 0x01000; -pub const MAP_LOCKED: ::c_int = 0x02000; -pub const MAP_NORESERVE: ::c_int = 0x04000; -pub const MAP_POPULATE: ::c_int = 0x08000; -pub const MAP_NONBLOCK: ::c_int = 0x010000; -pub const MAP_STACK: ::c_int = 0x020000; - -pub const EDEADLK: ::c_int = 35; -pub const ENAMETOOLONG: ::c_int = 36; -pub const ENOLCK: ::c_int = 37; -pub const ENOSYS: ::c_int = 38; -pub const ENOTEMPTY: ::c_int = 39; -pub const ELOOP: ::c_int = 40; -pub const ENOMSG: ::c_int = 42; -pub const EIDRM: ::c_int = 43; -pub const ECHRNG: ::c_int = 44; -pub const EL2NSYNC: ::c_int = 45; -pub const EL3HLT: ::c_int = 46; -pub const EL3RST: ::c_int = 47; -pub const ELNRNG: ::c_int = 48; -pub const EUNATCH: ::c_int = 49; -pub const ENOCSI: ::c_int = 50; -pub const EL2HLT: ::c_int = 51; -pub const EBADE: ::c_int = 52; -pub const EBADR: ::c_int = 53; -pub const EXFULL: ::c_int = 54; -pub const ENOANO: ::c_int = 55; -pub const EBADRQC: ::c_int = 56; -pub const EBADSLT: ::c_int = 57; - -pub const EMULTIHOP: ::c_int = 72; -pub const EBADMSG: ::c_int = 74; -pub const EOVERFLOW: ::c_int = 75; -pub const ENOTUNIQ: ::c_int = 76; -pub const EBADFD: ::c_int = 77; -pub const EREMCHG: ::c_int = 78; -pub const ELIBACC: ::c_int = 79; -pub const ELIBBAD: ::c_int = 80; -pub const ELIBSCN: ::c_int = 81; -pub const ELIBMAX: ::c_int = 82; -pub const ELIBEXEC: ::c_int = 83; -pub const EILSEQ: ::c_int = 84; -pub const ERESTART: ::c_int = 85; -pub const ESTRPIPE: ::c_int = 86; -pub const EUSERS: ::c_int = 87; -pub const ENOTSOCK: ::c_int = 88; -pub const EDESTADDRREQ: ::c_int = 89; -pub const EMSGSIZE: ::c_int = 90; -pub const EPROTOTYPE: ::c_int = 91; -pub const ENOPROTOOPT: ::c_int = 92; -pub const EPROTONOSUPPORT: ::c_int = 93; -pub const ESOCKTNOSUPPORT: ::c_int = 94; -pub const EOPNOTSUPP: ::c_int = 95; -pub const ENOTSUP: ::c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: ::c_int = 96; -pub const EAFNOSUPPORT: ::c_int = 97; -pub const EADDRINUSE: ::c_int = 98; -pub const EADDRNOTAVAIL: ::c_int = 99; -pub const ENETDOWN: ::c_int = 100; -pub const ENETUNREACH: ::c_int = 101; -pub const ENETRESET: ::c_int = 102; -pub const ECONNABORTED: ::c_int = 103; -pub const ECONNRESET: ::c_int = 104; -pub const ENOBUFS: ::c_int = 105; -pub const EISCONN: ::c_int = 106; -pub const ENOTCONN: ::c_int = 107; -pub const ESHUTDOWN: ::c_int = 108; -pub const ETOOMANYREFS: ::c_int = 109; -pub const ETIMEDOUT: ::c_int = 110; -pub const ECONNREFUSED: ::c_int = 111; -pub const EHOSTDOWN: ::c_int = 112; -pub const EHOSTUNREACH: ::c_int = 113; -pub const EALREADY: ::c_int = 114; -pub const EINPROGRESS: ::c_int = 115; -pub const ESTALE: ::c_int = 116; -pub const EUCLEAN: ::c_int = 117; -pub const ENOTNAM: ::c_int = 118; -pub const ENAVAIL: ::c_int = 119; -pub const EISNAM: ::c_int = 120; -pub const EREMOTEIO: ::c_int = 121; -pub const EDQUOT: ::c_int = 122; -pub const ENOMEDIUM: ::c_int = 123; -pub const EMEDIUMTYPE: ::c_int = 124; -pub const ECANCELED: ::c_int = 125; -pub const ENOKEY: ::c_int = 126; -pub const EKEYEXPIRED: ::c_int = 127; -pub const EKEYREVOKED: ::c_int = 128; -pub const EKEYREJECTED: ::c_int = 129; -pub const EOWNERDEAD: ::c_int = 130; -pub const ENOTRECOVERABLE: ::c_int = 131; - -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; -pub const SOCK_SEQPACKET: ::c_int = 5; - -pub const SOL_SOCKET: ::c_int = 1; - -pub const SO_REUSEADDR: ::c_int = 2; -pub const SO_TYPE: ::c_int = 3; -pub const SO_ERROR: ::c_int = 4; -pub const SO_DONTROUTE: ::c_int = 5; -pub const SO_BROADCAST: ::c_int = 6; -pub const SO_SNDBUF: ::c_int = 7; -pub const SO_RCVBUF: ::c_int = 8; -pub const SO_KEEPALIVE: ::c_int = 9; -pub const SO_OOBINLINE: ::c_int = 10; -pub const SO_LINGER: ::c_int = 13; -pub const SO_REUSEPORT: ::c_int = 15; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; -pub const SO_ACCEPTCONN: ::c_int = 30; - -pub const O_ACCMODE: ::c_int = 3; -pub const O_APPEND: ::c_int = 1024; -pub const O_CREAT: ::c_int = 64; -pub const O_EXCL: ::c_int = 128; -pub const O_NOCTTY: ::c_int = 256; -pub const O_NONBLOCK: ::c_int = 2048; -pub const O_SYNC: ::c_int = 0x101000; -pub const O_DIRECT: ::c_int = 0x10000; -pub const O_DIRECTORY: ::c_int = 0x4000; -pub const O_NOFOLLOW: ::c_int = 0x8000; -pub const O_ASYNC: ::c_int = 0x2000; -pub const O_NDELAY: ::c_int = 0x800; - -pub const NI_MAXHOST: ::size_t = 1025; - -pub const NCCS: usize = 19; -pub const TCSBRKP: ::c_int = 0x5425; -pub const TCSANOW: ::c_int = 0; -pub const TCSADRAIN: ::c_int = 0x1; -pub const TCSAFLUSH: ::c_int = 0x2; -pub const IUTF8: ::tcflag_t = 0x00004000; -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; - -pub const ADFS_SUPER_MAGIC: ::c_long = 0x0000adf5; -pub const AFFS_SUPER_MAGIC: ::c_long = 0x0000adff; -pub const CODA_SUPER_MAGIC: ::c_long = 0x73757245; -pub const CRAMFS_MAGIC: ::c_long = 0x28cd3d45; -pub const EFS_SUPER_MAGIC: ::c_long = 0x00414a53; -pub const EXT2_SUPER_MAGIC: ::c_long = 0x0000ef53; -pub const EXT3_SUPER_MAGIC: ::c_long = 0x0000ef53; -pub const EXT4_SUPER_MAGIC: ::c_long = 0x0000ef53; -pub const HPFS_SUPER_MAGIC: ::c_long = 0xf995e849; -pub const HUGETLBFS_MAGIC: ::c_long = 0x958458f6; -pub const ISOFS_SUPER_MAGIC: ::c_long = 0x00009660; -pub const JFFS2_SUPER_MAGIC: ::c_long = 0x000072b6; -pub const MINIX_SUPER_MAGIC: ::c_long = 0x0000137f; -pub const MINIX_SUPER_MAGIC2: ::c_long = 0x0000138f; -pub const MINIX2_SUPER_MAGIC: ::c_long = 0x00002468; -pub const MINIX2_SUPER_MAGIC2: ::c_long = 0x00002478; -pub const MSDOS_SUPER_MAGIC: ::c_long = 0x00004d44; -pub const NCP_SUPER_MAGIC: ::c_long = 0x0000564c; -pub const NFS_SUPER_MAGIC: ::c_long = 0x00006969; -pub const OPENPROM_SUPER_MAGIC: ::c_long = 0x00009fa1; -pub const PROC_SUPER_MAGIC: ::c_long = 0x00009fa0; -pub const QNX4_SUPER_MAGIC: ::c_long = 0x0000002f; -pub const REISERFS_SUPER_MAGIC: ::c_long = 0x52654973; -pub const SMB_SUPER_MAGIC: ::c_long = 0x0000517b; -pub const TMPFS_MAGIC: ::c_long = 0x01021994; -pub const USBDEVICE_SUPER_MAGIC: ::c_long = 0x00009fa2; - -pub const MADV_HUGEPAGE: ::c_int = 14; -pub const MADV_NOHUGEPAGE: ::c_int = 15; -pub const MAP_HUGETLB: ::c_int = 0x040000; - -pub const PTRACE_TRACEME: ::c_int = 0; -pub const PTRACE_PEEKTEXT: ::c_int = 1; -pub const PTRACE_PEEKDATA: ::c_int = 2; -pub const PTRACE_PEEKUSER: ::c_int = 3; -pub const PTRACE_POKETEXT: ::c_int = 4; -pub const PTRACE_POKEDATA: ::c_int = 5; -pub const PTRACE_POKEUSER: ::c_int = 6; -pub const PTRACE_CONT: ::c_int = 7; -pub const PTRACE_KILL: ::c_int = 8; -pub const PTRACE_SINGLESTEP: ::c_int = 9; -pub const PTRACE_ATTACH: ::c_int = 16; -pub const PTRACE_DETACH: ::c_int = 17; -pub const PTRACE_SYSCALL: ::c_int = 24; -pub const PTRACE_SETOPTIONS: ::c_int = 0x4200; -pub const PTRACE_GETEVENTMSG: ::c_int = 0x4201; -pub const PTRACE_GETSIGINFO: ::c_int = 0x4202; -pub const PTRACE_SETSIGINFO: ::c_int = 0x4203; -pub const PTRACE_GETFPREGS: ::c_int = 14; -pub const PTRACE_SETFPREGS: ::c_int = 15; -pub const PTRACE_GETREGS: ::c_int = 12; -pub const PTRACE_SETREGS: ::c_int = 13; - -pub const EFD_NONBLOCK: ::c_int = 0x800; - -pub const F_GETLK: ::c_int = 5; -pub const F_GETOWN: ::c_int = 9; -pub const F_SETOWN: ::c_int = 8; -pub const F_SETLK: ::c_int = 6; -pub const F_SETLKW: ::c_int = 7; - -pub const TCGETS: ::c_int = 0x5401; -pub const TCSETS: ::c_int = 0x5402; -pub const TCSETSW: ::c_int = 0x5403; -pub const TCSETSF: ::c_int = 0x5404; -pub const TCGETA: ::c_int = 0x5405; -pub const TCSETA: ::c_int = 0x5406; -pub const TCSETAW: ::c_int = 0x5407; -pub const TCSETAF: ::c_int = 0x5408; -pub const TCSBRK: ::c_int = 0x5409; -pub const TCXONC: ::c_int = 0x540A; -pub const TCFLSH: ::c_int = 0x540B; -pub const TIOCGSOFTCAR: ::c_int = 0x5419; -pub const TIOCSSOFTCAR: ::c_int = 0x541A; -pub const TIOCINQ: ::c_int = 0x541B; -pub const TIOCLINUX: ::c_int = 0x541C; -pub const TIOCGSERIAL: ::c_int = 0x541E; -pub const TIOCEXCL: ::c_int = 0x540C; -pub const TIOCNXCL: ::c_int = 0x540D; -pub const TIOCSCTTY: ::c_int = 0x540E; -pub const TIOCGPGRP: ::c_int = 0x540F; -pub const TIOCSPGRP: ::c_int = 0x5410; -pub const TIOCOUTQ: ::c_int = 0x5411; -pub const TIOCSTI: ::c_int = 0x5412; -pub const TIOCGWINSZ: ::c_int = 0x5413; -pub const TIOCSWINSZ: ::c_int = 0x5414; -pub const TIOCMGET: ::c_int = 0x5415; -pub const TIOCMBIS: ::c_int = 0x5416; -pub const TIOCMBIC: ::c_int = 0x5417; -pub const TIOCMSET: ::c_int = 0x5418; -pub const FIONREAD: ::c_int = 0x541B; -pub const TIOCCONS: ::c_int = 0x541D; - -pub const RTLD_GLOBAL: ::c_int = 0x2; -pub const RTLD_NOLOAD: ::c_int = 0x4; -pub const RTLD_NOW: ::c_int = 0; -pub const RTLD_DEFAULT: *mut ::c_void = -1isize as *mut ::c_void; - -pub const SEM_FAILED: *mut sem_t = 0 as *mut sem_t; - -pub const LINUX_REBOOT_MAGIC1: ::c_int = 0xfee1dead; -pub const LINUX_REBOOT_MAGIC2: ::c_int = 672274793; -pub const LINUX_REBOOT_MAGIC2A: ::c_int = 85072278; -pub const LINUX_REBOOT_MAGIC2B: ::c_int = 369367448; -pub const LINUX_REBOOT_MAGIC2C: ::c_int = 537993216; - -pub const LINUX_REBOOT_CMD_RESTART: ::c_int = 0x01234567; -pub const LINUX_REBOOT_CMD_HALT: ::c_int = 0xCDEF0123; -pub const LINUX_REBOOT_CMD_CAD_ON: ::c_int = 0x89ABCDEF; -pub const LINUX_REBOOT_CMD_CAD_OFF: ::c_int = 0x00000000; -pub const LINUX_REBOOT_CMD_POWER_OFF: ::c_int = 0x4321FEDC; -pub const LINUX_REBOOT_CMD_RESTART2: ::c_int = 0xA1B2C3D4; -pub const LINUX_REBOOT_CMD_SW_SUSPEND: ::c_int = 0xD000FCE2; -pub const LINUX_REBOOT_CMD_KEXEC: ::c_int = 0x45584543; - -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const SIGSTKSZ: ::size_t = 8192; -pub const CBAUD: ::tcflag_t = 0o0010017; -pub const TAB1: ::c_int = 0x00000800; -pub const TAB2: ::c_int = 0x00001000; -pub const TAB3: ::c_int = 0x00001800; -pub const CR1: ::c_int = 0x00000200; -pub const CR2: ::c_int = 0x00000400; -pub const CR3: ::c_int = 0x00000600; -pub const FF1: ::c_int = 0x00008000; -pub const BS1: ::c_int = 0x00002000; -pub const VT1: ::c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: ::tcflag_t = 0x00000400; -pub const IXOFF: ::tcflag_t = 0x00001000; -pub const ONLCR: ::tcflag_t = 0x4; -pub const CSIZE: ::tcflag_t = 0x00000030; -pub const CS6: ::tcflag_t = 0x00000010; -pub const CS7: ::tcflag_t = 0x00000020; -pub const CS8: ::tcflag_t = 0x00000030; -pub const CSTOPB: ::tcflag_t = 0x00000040; -pub const CREAD: ::tcflag_t = 0x00000080; -pub const PARENB: ::tcflag_t = 0x00000100; -pub const PARODD: ::tcflag_t = 0x00000200; -pub const HUPCL: ::tcflag_t = 0x00000400; -pub const CLOCAL: ::tcflag_t = 0x00000800; -pub const ECHOKE: ::tcflag_t = 0x00000800; -pub const ECHOE: ::tcflag_t = 0x00000010; -pub const ECHOK: ::tcflag_t = 0x00000020; -pub const ECHONL: ::tcflag_t = 0x00000040; -pub const ECHOPRT: ::tcflag_t = 0x00000400; -pub const ECHOCTL: ::tcflag_t = 0x00000200; -pub const ISIG: ::tcflag_t = 0x00000001; -pub const ICANON: ::tcflag_t = 0x00000002; -pub const PENDIN: ::tcflag_t = 0x00004000; -pub const NOFLSH: ::tcflag_t = 0x00000080; - -pub const EAI_SYSTEM: ::c_int = 11; - -pub const NETLINK_ROUTE: ::c_int = 0; -pub const NETLINK_UNUSED: ::c_int = 1; -pub const NETLINK_USERSOCK: ::c_int = 2; -pub const NETLINK_FIREWALL: ::c_int = 3; -pub const NETLINK_SOCK_DIAG: ::c_int = 4; -pub const NETLINK_NFLOG: ::c_int = 5; -pub const NETLINK_XFRM: ::c_int = 6; -pub const NETLINK_SELINUX: ::c_int = 7; -pub const NETLINK_ISCSI: ::c_int = 8; -pub const NETLINK_AUDIT: ::c_int = 9; -pub const NETLINK_FIB_LOOKUP: ::c_int = 10; -pub const NETLINK_CONNECTOR: ::c_int = 11; -pub const NETLINK_NETFILTER: ::c_int = 12; -pub const NETLINK_IP6_FW: ::c_int = 13; -pub const NETLINK_DNRTMSG: ::c_int = 14; -pub const NETLINK_KOBJECT_UEVENT: ::c_int = 15; -pub const NETLINK_GENERIC: ::c_int = 16; -pub const NETLINK_SCSITRANSPORT: ::c_int = 18; -pub const NETLINK_ECRYPTFS: ::c_int = 19; -pub const NETLINK_RDMA: ::c_int = 20; -pub const NETLINK_CRYPTO: ::c_int = 21; -pub const NETLINK_INET_DIAG: ::c_int = NETLINK_SOCK_DIAG; - -pub const MAX_LINKS: ::c_int = 32; - -pub const NLM_F_REQUEST: ::c_int = 1; -pub const NLM_F_MULTI: ::c_int = 2; -pub const NLM_F_ACK: ::c_int = 4; -pub const NLM_F_ECHO: ::c_int = 8; -pub const NLM_F_DUMP_INTR: ::c_int = 16; - -pub const NLM_F_ROOT: ::c_int = 0x100; -pub const NLM_F_MATCH: ::c_int = 0x200; -pub const NLM_F_ATOMIC: ::c_int = 0x400; -pub const NLM_F_DUMP: ::c_int = NLM_F_ROOT | NLM_F_MATCH; - -pub const NLM_F_REPLACE: ::c_int = 0x100; -pub const NLM_F_EXCL: ::c_int = 0x200; -pub const NLM_F_CREATE: ::c_int = 0x400; -pub const NLM_F_APPEND: ::c_int = 0x800; - -pub const NLMSG_NOOP: ::c_int = 0x1; -pub const NLMSG_ERROR: ::c_int = 0x2; -pub const NLMSG_DONE: ::c_int = 0x3; -pub const NLMSG_OVERRUN: ::c_int = 0x4; -pub const NLMSG_MIN_TYPE: ::c_int = 0x10; - -pub const NETLINK_ADD_MEMBERSHIP: ::c_int = 1; -pub const NETLINK_DROP_MEMBERSHIP: ::c_int = 2; -pub const NETLINK_PKTINFO: ::c_int = 3; -pub const NETLINK_BROADCAST_ERROR: ::c_int = 4; -pub const NETLINK_NO_ENOBUFS: ::c_int = 5; -pub const NETLINK_RX_RING: ::c_int = 6; -pub const NETLINK_TX_RING: ::c_int = 7; - -pub const NLA_F_NESTED: ::c_int = 1 << 15; -pub const NLA_F_NET_BYTEORDER: ::c_int = 1 << 14; -pub const NLA_TYPE_MASK: ::c_int = !(NLA_F_NESTED | NLA_F_NET_BYTEORDER); - -pub const SIGEV_THREAD_ID: ::c_int = 4; - -f! { - pub fn sigemptyset(set: *mut sigset_t) -> ::c_int { - *set = 0; - return 0 - } - pub fn sigaddset(set: *mut sigset_t, signum: ::c_int) -> ::c_int { - *set |= signum as sigset_t; - return 0 - } - pub fn sigfillset(set: *mut sigset_t) -> ::c_int { - *set = !0; - return 0 - } - pub fn sigdelset(set: *mut sigset_t, signum: ::c_int) -> ::c_int { - *set &= !(signum as sigset_t); - return 0 - } - pub fn sigismember(set: *const sigset_t, signum: ::c_int) -> ::c_int { - (*set & (signum as sigset_t)) as ::c_int - } - pub fn cfgetispeed(termios: *const ::termios) -> ::speed_t { - (*termios).c_cflag & ::CBAUD - } - pub fn cfgetospeed(termios: *const ::termios) -> ::speed_t { - (*termios).c_cflag & ::CBAUD - } - pub fn cfsetispeed(termios: *mut ::termios, speed: ::speed_t) -> ::c_int { - let cbaud = ::CBAUD; - (*termios).c_cflag = ((*termios).c_cflag & !cbaud) | (speed & cbaud); - return 0 - } - pub fn cfsetospeed(termios: *mut ::termios, speed: ::speed_t) -> ::c_int { - let cbaud = ::CBAUD; - (*termios).c_cflag = ((*termios).c_cflag & !cbaud) | (speed & cbaud); - return 0 - } - pub fn tcgetattr(fd: ::c_int, termios: *mut ::termios) -> ::c_int { - ioctl(fd, ::TCGETS, termios) - } - pub fn tcsetattr(fd: ::c_int, - optional_actions: ::c_int, - termios: *const ::termios) -> ::c_int { - ioctl(fd, optional_actions, termios) - } - pub fn tcflow(fd: ::c_int, action: ::c_int) -> ::c_int { - ioctl(fd, ::TCXONC, action as *mut ::c_void) - } - pub fn tcflush(fd: ::c_int, action: ::c_int) -> ::c_int { - ioctl(fd, ::TCFLSH, action as *mut ::c_void) - } - pub fn tcsendbreak(fd: ::c_int, duration: ::c_int) -> ::c_int { - ioctl(fd, TCSBRKP, duration as *mut ::c_void) - } - - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { - for slot in cpuset.__bits.iter_mut() { - *slot = 0; - } - } - - pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in___bits = 8 * mem::size_of_val(&cpuset.__bits[0]); - let (idx, offset) = (cpu / size_in___bits, cpu % size_in___bits); - cpuset.__bits[idx] |= 1 << offset; - () - } - - pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in___bits = 8 * mem::size_of_val(&cpuset.__bits[0]); - let (idx, offset) = (cpu / size_in___bits, cpu % size_in___bits); - cpuset.__bits[idx] &= !(1 << offset); - () - } - - pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { - let size_in___bits = 8 * mem::size_of_val(&cpuset.__bits[0]); - let (idx, offset) = (cpu / size_in___bits, cpu % size_in___bits); - 0 != (cpuset.__bits[idx] & (1 << offset)) - } - - pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { - set1.__bits == set2.__bits - } -} - -extern { - static mut __progname: *mut ::c_char; -} - -extern { - pub fn madvise(addr: *const ::c_void, len: ::size_t, advice: ::c_int) - -> ::c_int; - pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; - pub fn readlink(path: *const ::c_char, - buf: *mut ::c_char, - bufsz: ::size_t) - -> ::c_int; - pub fn msync(addr: *const ::c_void, len: ::size_t, - flags: ::c_int) -> ::c_int; - pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int) - -> ::c_int; - pub fn sysconf(name: ::c_int) -> ::c_long; - pub fn recvfrom(socket: ::c_int, buf: *mut ::c_void, len: ::size_t, - flags: ::c_int, addr: *const ::sockaddr, - addrlen: *mut ::socklen_t) -> ::ssize_t; - pub fn getnameinfo(sa: *const ::sockaddr, - salen: ::socklen_t, - host: *mut ::c_char, - hostlen: ::size_t, - serv: *mut ::c_char, - sevlen: ::size_t, - flags: ::c_int) -> ::c_int; - pub fn ptrace(request: ::c_int, ...) -> ::c_long; - pub fn getpriority(which: ::c_int, who: ::c_int) -> ::c_int; - pub fn setpriority(which: ::c_int, who: ::c_int, prio: ::c_int) -> ::c_int; - pub fn __sched_cpualloc(count: ::size_t) -> *mut ::cpu_set_t; - pub fn __sched_cpufree(set: *mut ::cpu_set_t); - pub fn __sched_cpucount(setsize: ::size_t, set: *mut cpu_set_t) -> ::c_int; - pub fn sched_getcpu() -> ::c_int; - - pub fn utmpname(name: *const ::c_char) -> ::c_int; - pub fn setutent(); - pub fn getutent() -> *mut utmp; -} - -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - mod b32; - pub use self::b32::*; - } else if #[cfg(target_pointer_width = "64")] { - mod b64; - pub use self::b64::*; - } else { - // Unknown target_pointer_width - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mips32.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mips32.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mips32.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mips32.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,281 +0,0 @@ -pub type c_char = i8; -pub type c_long = i32; -pub type c_ulong = u32; -pub type clock_t = i32; -pub type time_t = i32; -pub type suseconds_t = i32; -pub type wchar_t = i32; -pub type off_t = i32; -pub type ino_t = u32; -pub type blkcnt_t = i32; -pub type blksize_t = i32; -pub type nlink_t = u32; -pub type fsblkcnt_t = ::c_ulong; -pub type fsfilcnt_t = ::c_ulong; -pub type rlim_t = c_ulong; - -s! { - pub struct aiocb { - pub aio_fildes: ::c_int, - pub aio_lio_opcode: ::c_int, - pub aio_reqprio: ::c_int, - pub aio_buf: *mut ::c_void, - pub aio_nbytes: ::size_t, - pub aio_sigevent: ::sigevent, - __next_prio: *mut aiocb, - __abs_prio: ::c_int, - __policy: ::c_int, - __error_code: ::c_int, - __return_value: ::ssize_t, - pub aio_offset: off_t, - __unused1: [::c_char; 4], - __glibc_reserved: [::c_char; 32] - } - - pub struct stat { - pub st_dev: ::c_ulong, - st_pad1: [::c_long; 3], - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::c_ulong, - pub st_pad2: [::c_long; 2], - pub st_size: ::off_t, - st_pad3: ::c_long, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - st_pad5: [::c_long; 14], - } - - pub struct stat64 { - pub st_dev: ::c_ulong, - st_pad1: [::c_long; 3], - pub st_ino: ::ino64_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::c_ulong, - st_pad2: [::c_long; 2], - pub st_size: ::off64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_blksize: ::blksize_t, - st_pad3: ::c_long, - pub st_blocks: ::blkcnt64_t, - st_pad5: [::c_long; 14], - } - - pub struct pthread_attr_t { - __size: [u32; 9] - } - - pub struct sigaction { - pub sa_flags: ::c_int, - pub sa_sigaction: ::sighandler_t, - pub sa_mask: sigset_t, - _restorer: *mut ::c_void, - _resv: [::c_int; 1], - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_size: ::size_t, - pub ss_flags: ::c_int, - } - - pub struct sigset_t { - __val: [::c_ulong; 32], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_code: ::c_int, - pub si_errno: ::c_int, - pub _pad: [::c_int; 29], - } - - pub struct glob64_t { - pub gl_pathc: ::size_t, - pub gl_pathv: *mut *mut ::c_char, - pub gl_offs: ::size_t, - pub gl_flags: ::c_int, - - __unused1: *mut ::c_void, - __unused2: *mut ::c_void, - __unused3: *mut ::c_void, - __unused4: *mut ::c_void, - __unused5: *mut ::c_void, - } - - pub struct ipc_perm { - pub __key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::c_uint, - pub __seq: ::c_ushort, - __pad1: ::c_ushort, - __unused1: ::c_ulong, - __unused2: ::c_ulong - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - pub shm_dtime: ::time_t, - pub shm_ctime: ::time_t, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::shmatt_t, - __unused4: ::c_ulong, - __unused5: ::c_ulong - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - #[cfg(target_endian = "big")] - __glibc_reserved1: ::c_ulong, - pub msg_stime: ::time_t, - #[cfg(target_endian = "little")] - __glibc_reserved1: ::c_ulong, - #[cfg(target_endian = "big")] - __glibc_reserved2: ::c_ulong, - pub msg_rtime: ::time_t, - #[cfg(target_endian = "little")] - __glibc_reserved2: ::c_ulong, - #[cfg(target_endian = "big")] - __glibc_reserved3: ::c_ulong, - pub msg_ctime: ::time_t, - #[cfg(target_endian = "little")] - __glibc_reserved3: ::c_ulong, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __glibc_reserved4: ::c_ulong, - __glibc_reserved5: ::c_ulong, - } - - pub struct statfs { - pub f_type: ::c_long, - pub f_bsize: ::c_long, - pub f_frsize: ::c_long, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_files: ::fsblkcnt_t, - pub f_ffree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_fsid: ::fsid_t, - - pub f_namelen: ::c_long, - f_spare: [::c_long; 6], - } - - pub struct msghdr { - pub msg_name: *mut ::c_void, - pub msg_namelen: ::socklen_t, - pub msg_iov: *mut ::iovec, - pub msg_iovlen: ::size_t, - pub msg_control: *mut ::c_void, - pub msg_controllen: ::size_t, - pub msg_flags: ::c_int, - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_line: ::cc_t, - pub c_cc: [::cc_t; ::NCCS], - } - - pub struct flock { - pub l_type: ::c_short, - pub l_whence: ::c_short, - pub l_start: ::off_t, - pub l_len: ::off_t, - pub l_sysid: ::c_long, - pub l_pid: ::pid_t, - pad: [::c_long; 4], - } - - pub struct sysinfo { - pub uptime: ::c_long, - pub loads: [::c_ulong; 3], - pub totalram: ::c_ulong, - pub freeram: ::c_ulong, - pub sharedram: ::c_ulong, - pub bufferram: ::c_ulong, - pub totalswap: ::c_ulong, - pub freeswap: ::c_ulong, - pub procs: ::c_ushort, - pub pad: ::c_ushort, - pub totalhigh: ::c_ulong, - pub freehigh: ::c_ulong, - pub mem_unit: ::c_uint, - pub _f: [::c_char; 8], - } - - // FIXME this is actually a union - pub struct sem_t { - #[cfg(target_pointer_width = "32")] - __size: [::c_char; 16], - #[cfg(target_pointer_width = "64")] - __size: [::c_char; 32], - __align: [::c_long; 0], - } -} - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; - -pub const RLIM_INFINITY: ::rlim_t = 0x7fffffff; - -pub const SYS_gettid: ::c_long = 4222; // Valid for O32 - -#[link(name = "util")] -extern { - pub fn sysctl(name: *mut ::c_int, - namelen: ::c_int, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *mut ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; - pub fn backtrace(buf: *mut *mut ::c_void, - sz: ::c_int) -> ::c_int; - pub fn glob64(pattern: *const ::c_char, - flags: ::c_int, - errfunc: ::dox::Option ::c_int>, - pglob: *mut glob64_t) -> ::c_int; - pub fn globfree64(pglob: *mut glob64_t); - pub fn ptrace(request: ::c_uint, ...) -> ::c_long; - pub fn pthread_attr_getaffinity_np(attr: *const ::pthread_attr_t, - cpusetsize: ::size_t, - cpuset: *mut ::cpu_set_t) -> ::c_int; - pub fn pthread_attr_setaffinity_np(attr: *mut ::pthread_attr_t, - cpusetsize: ::size_t, - cpuset: *const ::cpu_set_t) -> ::c_int; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mips64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mips64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mips64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mips64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,219 +0,0 @@ -pub type blkcnt_t = i64; -pub type blksize_t = i64; -pub type c_char = i8; -pub type c_long = i64; -pub type c_ulong = u64; -pub type fsblkcnt_t = ::c_ulong; -pub type fsfilcnt_t = ::c_ulong; -pub type ino_t = u64; -pub type nlink_t = u64; -pub type off_t = i64; -pub type rlim_t = ::c_ulong; -pub type suseconds_t = i64; -pub type time_t = i64; -pub type wchar_t = i32; - -s! { - pub struct aiocb { - pub aio_fildes: ::c_int, - pub aio_lio_opcode: ::c_int, - pub aio_reqprio: ::c_int, - pub aio_buf: *mut ::c_void, - pub aio_nbytes: ::size_t, - pub aio_sigevent: ::sigevent, - __next_prio: *mut aiocb, - __abs_prio: ::c_int, - __policy: ::c_int, - __error_code: ::c_int, - __return_value: ::ssize_t, - pub aio_offset: off_t, - __glibc_reserved: [::c_char; 32] - } - - pub struct stat { - pub st_dev: ::c_ulong, - st_pad1: [::c_long; 2], - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::c_ulong, - st_pad2: [::c_ulong; 1], - pub st_size: ::off_t, - st_pad3: ::c_long, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_blksize: ::blksize_t, - st_pad4: ::c_long, - pub st_blocks: ::blkcnt_t, - st_pad5: [::c_long; 7], - } - - pub struct stat64 { - pub st_dev: ::c_ulong, - st_pad1: [::c_long; 2], - pub st_ino: ::ino64_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::c_ulong, - st_pad2: [::c_long; 2], - pub st_size: ::off64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_blksize: ::blksize_t, - st_pad3: ::c_long, - pub st_blocks: ::blkcnt64_t, - st_pad5: [::c_long; 7], - } - - pub struct pthread_attr_t { - __size: [::c_ulong; 7] - } - - pub struct sigaction { - pub sa_flags: ::c_int, - pub sa_sigaction: ::sighandler_t, - pub sa_mask: sigset_t, - _restorer: *mut ::c_void, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_size: ::size_t, - pub ss_flags: ::c_int, - } - - pub struct sigset_t { - __size: [::c_ulong; 16], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_code: ::c_int, - pub si_errno: ::c_int, - _pad: ::c_int, - _pad2: [::c_long; 14], - } - - pub struct ipc_perm { - pub __key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::c_uint, - pub __seq: ::c_ushort, - __pad1: ::c_ushort, - __unused1: ::c_ulong, - __unused2: ::c_ulong - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - pub shm_dtime: ::time_t, - pub shm_ctime: ::time_t, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::shmatt_t, - __unused4: ::c_ulong, - __unused5: ::c_ulong - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - pub msg_stime: ::time_t, - pub msg_rtime: ::time_t, - pub msg_ctime: ::time_t, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __glibc_reserved4: ::c_ulong, - __glibc_reserved5: ::c_ulong, - } - - pub struct statfs { - pub f_type: ::c_long, - pub f_bsize: ::c_long, - pub f_frsize: ::c_long, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_files: ::fsblkcnt_t, - pub f_ffree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_fsid: ::fsid_t, - - pub f_namelen: ::c_long, - f_spare: [::c_long; 6], - } - - pub struct msghdr { - pub msg_name: *mut ::c_void, - pub msg_namelen: ::socklen_t, - pub msg_iov: *mut ::iovec, - pub msg_iovlen: ::size_t, - pub msg_control: *mut ::c_void, - pub msg_controllen: ::size_t, - pub msg_flags: ::c_int, - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_line: ::cc_t, - pub c_cc: [::cc_t; ::NCCS], - } - - pub struct sysinfo { - pub uptime: ::c_long, - pub loads: [::c_ulong; 3], - pub totalram: ::c_ulong, - pub freeram: ::c_ulong, - pub sharedram: ::c_ulong, - pub bufferram: ::c_ulong, - pub totalswap: ::c_ulong, - pub freeswap: ::c_ulong, - pub procs: ::c_ushort, - pub pad: ::c_ushort, - pub totalhigh: ::c_ulong, - pub freehigh: ::c_ulong, - pub mem_unit: ::c_uint, - pub _f: [::c_char; 0], - } - - // FIXME this is actually a union - pub struct sem_t { - __size: [::c_char; 32], - __align: [::c_long; 0], - } -} - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; - -pub const RLIM_INFINITY: ::rlim_t = 0xffff_ffff_ffff_ffff; - -pub const SYS_gettid: ::c_long = 5178; // Valid for n64 - -#[link(name = "util")] -extern { - pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mips/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,400 +0,0 @@ -pub const BUFSIZ: ::c_uint = 8192; -pub const TMP_MAX: ::c_uint = 238328; -pub const FOPEN_MAX: ::c_uint = 16; -pub const POSIX_FADV_DONTNEED: ::c_int = 4; -pub const POSIX_FADV_NOREUSE: ::c_int = 5; -pub const POSIX_MADV_DONTNEED: ::c_int = 4; -pub const _SC_2_C_VERSION: ::c_int = 96; -pub const O_ACCMODE: ::c_int = 3; -pub const O_DIRECT: ::c_int = 0x8000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_NOFOLLOW: ::c_int = 0x20000; -pub const ST_RELATIME: ::c_ulong = 4096; -pub const NI_MAXHOST: ::socklen_t = 1025; - -pub const RLIMIT_NOFILE: ::c_int = 5; -pub const RLIMIT_AS: ::c_int = 6; -pub const RLIMIT_RSS: ::c_int = 7; -pub const RLIMIT_NPROC: ::c_int = 8; -pub const RLIMIT_MEMLOCK: ::c_int = 9; -pub const RLIMIT_NLIMITS: ::c_int = 16; - -pub const O_APPEND: ::c_int = 8; -pub const O_CREAT: ::c_int = 256; -pub const O_EXCL: ::c_int = 1024; -pub const O_NOCTTY: ::c_int = 2048; -pub const O_NONBLOCK: ::c_int = 128; -pub const O_SYNC: ::c_int = 0x4010; -pub const O_RSYNC: ::c_int = 0x4010; -pub const O_DSYNC: ::c_int = 0x10; -pub const O_FSYNC: ::c_int = 0x4010; -pub const O_ASYNC: ::c_int = 0x1000; -pub const O_NDELAY: ::c_int = 0x80; - -pub const SOCK_NONBLOCK: ::c_int = 128; - -pub const EDEADLK: ::c_int = 45; -pub const ENAMETOOLONG: ::c_int = 78; -pub const ENOLCK: ::c_int = 46; -pub const ENOSYS: ::c_int = 89; -pub const ENOTEMPTY: ::c_int = 93; -pub const ELOOP: ::c_int = 90; -pub const ENOMSG: ::c_int = 35; -pub const EIDRM: ::c_int = 36; -pub const ECHRNG: ::c_int = 37; -pub const EL2NSYNC: ::c_int = 38; -pub const EL3HLT: ::c_int = 39; -pub const EL3RST: ::c_int = 40; -pub const ELNRNG: ::c_int = 41; -pub const EUNATCH: ::c_int = 42; -pub const ENOCSI: ::c_int = 43; -pub const EL2HLT: ::c_int = 44; -pub const EBADE: ::c_int = 50; -pub const EBADR: ::c_int = 51; -pub const EXFULL: ::c_int = 52; -pub const ENOANO: ::c_int = 53; -pub const EBADRQC: ::c_int = 54; -pub const EBADSLT: ::c_int = 55; -pub const EDEADLOCK: ::c_int = 56; -pub const EMULTIHOP: ::c_int = 74; -pub const EOVERFLOW: ::c_int = 79; -pub const ENOTUNIQ: ::c_int = 80; -pub const EBADFD: ::c_int = 81; -pub const EBADMSG: ::c_int = 77; -pub const EREMCHG: ::c_int = 82; -pub const ELIBACC: ::c_int = 83; -pub const ELIBBAD: ::c_int = 84; -pub const ELIBSCN: ::c_int = 85; -pub const ELIBMAX: ::c_int = 86; -pub const ELIBEXEC: ::c_int = 87; -pub const EILSEQ: ::c_int = 88; -pub const ERESTART: ::c_int = 91; -pub const ESTRPIPE: ::c_int = 92; -pub const EUSERS: ::c_int = 94; -pub const ENOTSOCK: ::c_int = 95; -pub const EDESTADDRREQ: ::c_int = 96; -pub const EMSGSIZE: ::c_int = 97; -pub const EPROTOTYPE: ::c_int = 98; -pub const ENOPROTOOPT: ::c_int = 99; -pub const EPROTONOSUPPORT: ::c_int = 120; -pub const ESOCKTNOSUPPORT: ::c_int = 121; -pub const EOPNOTSUPP: ::c_int = 122; -pub const ENOTSUP: ::c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: ::c_int = 123; -pub const EAFNOSUPPORT: ::c_int = 124; -pub const EADDRINUSE: ::c_int = 125; -pub const EADDRNOTAVAIL: ::c_int = 126; -pub const ENETDOWN: ::c_int = 127; -pub const ENETUNREACH: ::c_int = 128; -pub const ENETRESET: ::c_int = 129; -pub const ECONNABORTED: ::c_int = 130; -pub const ECONNRESET: ::c_int = 131; -pub const ENOBUFS: ::c_int = 132; -pub const EISCONN: ::c_int = 133; -pub const ENOTCONN: ::c_int = 134; -pub const ESHUTDOWN: ::c_int = 143; -pub const ETOOMANYREFS: ::c_int = 144; -pub const ETIMEDOUT: ::c_int = 145; -pub const ECONNREFUSED: ::c_int = 146; -pub const EHOSTDOWN: ::c_int = 147; -pub const EHOSTUNREACH: ::c_int = 148; -pub const EALREADY: ::c_int = 149; -pub const EINPROGRESS: ::c_int = 150; -pub const ESTALE: ::c_int = 151; -pub const EUCLEAN: ::c_int = 135; -pub const ENOTNAM: ::c_int = 137; -pub const ENAVAIL: ::c_int = 138; -pub const EISNAM: ::c_int = 139; -pub const EREMOTEIO: ::c_int = 140; -pub const EDQUOT: ::c_int = 1133; -pub const ENOMEDIUM: ::c_int = 159; -pub const EMEDIUMTYPE: ::c_int = 160; -pub const ECANCELED: ::c_int = 158; -pub const ENOKEY: ::c_int = 161; -pub const EKEYEXPIRED: ::c_int = 162; -pub const EKEYREVOKED: ::c_int = 163; -pub const EKEYREJECTED: ::c_int = 164; -pub const EOWNERDEAD: ::c_int = 165; -pub const ENOTRECOVERABLE: ::c_int = 166; -pub const ERFKILL: ::c_int = 167; - -pub const LC_PAPER: ::c_int = 7; -pub const LC_NAME: ::c_int = 8; -pub const LC_ADDRESS: ::c_int = 9; -pub const LC_TELEPHONE: ::c_int = 10; -pub const LC_MEASUREMENT: ::c_int = 11; -pub const LC_IDENTIFICATION: ::c_int = 12; -pub const LC_PAPER_MASK: ::c_int = (1 << LC_PAPER); -pub const LC_NAME_MASK: ::c_int = (1 << LC_NAME); -pub const LC_ADDRESS_MASK: ::c_int = (1 << LC_ADDRESS); -pub const LC_TELEPHONE_MASK: ::c_int = (1 << LC_TELEPHONE); -pub const LC_MEASUREMENT_MASK: ::c_int = (1 << LC_MEASUREMENT); -pub const LC_IDENTIFICATION_MASK: ::c_int = (1 << LC_IDENTIFICATION); -pub const LC_ALL_MASK: ::c_int = ::LC_CTYPE_MASK - | ::LC_NUMERIC_MASK - | ::LC_TIME_MASK - | ::LC_COLLATE_MASK - | ::LC_MONETARY_MASK - | ::LC_MESSAGES_MASK - | LC_PAPER_MASK - | LC_NAME_MASK - | LC_ADDRESS_MASK - | LC_TELEPHONE_MASK - | LC_MEASUREMENT_MASK - | LC_IDENTIFICATION_MASK; - -pub const MAP_NORESERVE: ::c_int = 0x400; -pub const MAP_ANON: ::c_int = 0x800; -pub const MAP_ANONYMOUS: ::c_int = 0x800; -pub const MAP_GROWSDOWN: ::c_int = 0x1000; -pub const MAP_DENYWRITE: ::c_int = 0x2000; -pub const MAP_EXECUTABLE: ::c_int = 0x4000; -pub const MAP_LOCKED: ::c_int = 0x8000; -pub const MAP_POPULATE: ::c_int = 0x10000; -pub const MAP_NONBLOCK: ::c_int = 0x20000; -pub const MAP_STACK: ::c_int = 0x40000; - -pub const SOCK_STREAM: ::c_int = 2; -pub const SOCK_DGRAM: ::c_int = 1; -pub const SOCK_SEQPACKET: ::c_int = 5; - -pub const SOL_SOCKET: ::c_int = 0xffff; - -pub const SO_REUSEADDR: ::c_int = 4; -pub const SO_REUSEPORT: ::c_int = 0x200; -pub const SO_TYPE: ::c_int = 4104; -pub const SO_ERROR: ::c_int = 4103; -pub const SO_DONTROUTE: ::c_int = 16; -pub const SO_BROADCAST: ::c_int = 32; -pub const SO_SNDBUF: ::c_int = 4097; -pub const SO_RCVBUF: ::c_int = 4098; -pub const SO_KEEPALIVE: ::c_int = 8; -pub const SO_OOBINLINE: ::c_int = 256; -pub const SO_LINGER: ::c_int = 128; -pub const SO_RCVLOWAT: ::c_int = 4100; -pub const SO_SNDLOWAT: ::c_int = 4099; -pub const SO_RCVTIMEO: ::c_int = 4102; -pub const SO_SNDTIMEO: ::c_int = 4101; -pub const SO_ACCEPTCONN: ::c_int = 4105; - -pub const FIOCLEX: ::c_ulong = 0x6601; -pub const FIONBIO: ::c_ulong = 0x667e; - -pub const SA_ONSTACK: ::c_int = 0x08000000; -pub const SA_SIGINFO: ::c_int = 0x00000008; -pub const SA_NOCLDWAIT: ::c_int = 0x00010000; - -pub const SIGCHLD: ::c_int = 18; -pub const SIGBUS: ::c_int = 10; -pub const SIGTTIN: ::c_int = 26; -pub const SIGTTOU: ::c_int = 27; -pub const SIGXCPU: ::c_int = 30; -pub const SIGXFSZ: ::c_int = 31; -pub const SIGVTALRM: ::c_int = 28; -pub const SIGPROF: ::c_int = 29; -pub const SIGWINCH: ::c_int = 20; -pub const SIGUSR1: ::c_int = 16; -pub const SIGUSR2: ::c_int = 17; -pub const SIGCONT: ::c_int = 25; -pub const SIGSTOP: ::c_int = 23; -pub const SIGTSTP: ::c_int = 24; -pub const SIGURG: ::c_int = 21; -pub const SIGIO: ::c_int = 22; -pub const SIGSYS: ::c_int = 12; -pub const SIGPOLL: ::c_int = 22; -pub const SIGPWR: ::c_int = 19; -pub const SIG_SETMASK: ::c_int = 3; -pub const SIG_BLOCK: ::c_int = 0x1; -pub const SIG_UNBLOCK: ::c_int = 0x2; - -pub const POLLRDNORM: ::c_short = 0x040; -pub const POLLWRNORM: ::c_short = 0x004; -pub const POLLRDBAND: ::c_short = 0x080; -pub const POLLWRBAND: ::c_short = 0x100; - -pub const PTHREAD_STACK_MIN: ::size_t = 131072; - -pub const ADFS_SUPER_MAGIC: ::c_long = 0x0000adf5; -pub const AFFS_SUPER_MAGIC: ::c_long = 0x0000adff; -pub const CODA_SUPER_MAGIC: ::c_long = 0x73757245; -pub const CRAMFS_MAGIC: ::c_long = 0x28cd3d45; -pub const EFS_SUPER_MAGIC: ::c_long = 0x00414a53; -pub const EXT2_SUPER_MAGIC: ::c_long = 0x0000ef53; -pub const EXT3_SUPER_MAGIC: ::c_long = 0x0000ef53; -pub const EXT4_SUPER_MAGIC: ::c_long = 0x0000ef53; -pub const HPFS_SUPER_MAGIC: ::c_long = 0xf995e849; -pub const HUGETLBFS_MAGIC: ::c_long = 0x958458f6; -pub const ISOFS_SUPER_MAGIC: ::c_long = 0x00009660; -pub const JFFS2_SUPER_MAGIC: ::c_long = 0x000072b6; -pub const MINIX_SUPER_MAGIC: ::c_long = 0x0000137f; -pub const MINIX_SUPER_MAGIC2: ::c_long = 0x0000138f; -pub const MINIX2_SUPER_MAGIC: ::c_long = 0x00002468; -pub const MINIX2_SUPER_MAGIC2: ::c_long = 0x00002478; -pub const MSDOS_SUPER_MAGIC: ::c_long = 0x00004d44; -pub const NCP_SUPER_MAGIC: ::c_long = 0x0000564c; -pub const NFS_SUPER_MAGIC: ::c_long = 0x00006969; -pub const OPENPROM_SUPER_MAGIC: ::c_long = 0x00009fa1; -pub const PROC_SUPER_MAGIC: ::c_long = 0x00009fa0; -pub const QNX4_SUPER_MAGIC: ::c_long = 0x0000002f; -pub const REISERFS_SUPER_MAGIC: ::c_long = 0x52654973; -pub const SMB_SUPER_MAGIC: ::c_long = 0x0000517b; -pub const TMPFS_MAGIC: ::c_long = 0x01021994; -pub const USBDEVICE_SUPER_MAGIC: ::c_long = 0x00009fa2; - -pub const VEOF: usize = 16; -pub const VEOL: usize = 17; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 4; -pub const IEXTEN: ::tcflag_t = 0x00000100; -pub const TOSTOP: ::tcflag_t = 0x00008000; -pub const FLUSHO: ::tcflag_t = 0x00002000; -pub const IUTF8: ::tcflag_t = 0x00004000; -pub const TCSANOW: ::c_int = 0x540e; -pub const TCSADRAIN: ::c_int = 0x540f; -pub const TCSAFLUSH: ::c_int = 0x5410; - -pub const CPU_SETSIZE: ::c_int = 0x400; - -pub const PTRACE_TRACEME: ::c_uint = 0; -pub const PTRACE_PEEKTEXT: ::c_uint = 1; -pub const PTRACE_PEEKDATA: ::c_uint = 2; -pub const PTRACE_PEEKUSER: ::c_uint = 3; -pub const PTRACE_POKETEXT: ::c_uint = 4; -pub const PTRACE_POKEDATA: ::c_uint = 5; -pub const PTRACE_POKEUSER: ::c_uint = 6; -pub const PTRACE_CONT: ::c_uint = 7; -pub const PTRACE_KILL: ::c_uint = 8; -pub const PTRACE_SINGLESTEP: ::c_uint = 9; -pub const PTRACE_ATTACH: ::c_uint = 16; -pub const PTRACE_DETACH: ::c_uint = 17; -pub const PTRACE_SYSCALL: ::c_uint = 24; -pub const PTRACE_SETOPTIONS: ::c_uint = 0x4200; -pub const PTRACE_GETEVENTMSG: ::c_uint = 0x4201; -pub const PTRACE_GETSIGINFO: ::c_uint = 0x4202; -pub const PTRACE_SETSIGINFO: ::c_uint = 0x4203; -pub const PTRACE_GETFPREGS: ::c_uint = 14; -pub const PTRACE_SETFPREGS: ::c_uint = 15; -pub const PTRACE_GETFPXREGS: ::c_uint = 18; -pub const PTRACE_SETFPXREGS: ::c_uint = 19; -pub const PTRACE_GETREGS: ::c_uint = 12; -pub const PTRACE_SETREGS: ::c_uint = 13; - -pub const MAP_HUGETLB: ::c_int = 0x080000; - -pub const EFD_NONBLOCK: ::c_int = 0x80; - -pub const F_GETLK: ::c_int = 14; -pub const F_GETOWN: ::c_int = 23; -pub const F_SETOWN: ::c_int = 24; -pub const F_SETLK: ::c_int = 6; -pub const F_SETLKW: ::c_int = 7; - -pub const SFD_NONBLOCK: ::c_int = 0x80; - -pub const TCGETS: ::c_ulong = 0x540d; -pub const TCSETS: ::c_ulong = 0x540e; -pub const TCSETSW: ::c_ulong = 0x540f; -pub const TCSETSF: ::c_ulong = 0x5410; -pub const TCGETA: ::c_ulong = 0x5401; -pub const TCSETA: ::c_ulong = 0x5402; -pub const TCSETAW: ::c_ulong = 0x5403; -pub const TCSETAF: ::c_ulong = 0x5404; -pub const TCSBRK: ::c_ulong = 0x5405; -pub const TCXONC: ::c_ulong = 0x5406; -pub const TCFLSH: ::c_ulong = 0x5407; -pub const TIOCGSOFTCAR: ::c_ulong = 0x5481; -pub const TIOCSSOFTCAR: ::c_ulong = 0x5482; -pub const TIOCINQ: ::c_ulong = 0x467f; -pub const TIOCLINUX: ::c_ulong = 0x5483; -pub const TIOCGSERIAL: ::c_ulong = 0x5484; -pub const TIOCEXCL: ::c_ulong = 0x740d; -pub const TIOCNXCL: ::c_ulong = 0x740e; -pub const TIOCSCTTY: ::c_ulong = 0x5480; -pub const TIOCGPGRP: ::c_ulong = 0x40047477; -pub const TIOCSPGRP: ::c_ulong = 0x80047476; -pub const TIOCOUTQ: ::c_ulong = 0x7472; -pub const TIOCSTI: ::c_ulong = 0x5472; -pub const TIOCGWINSZ: ::c_ulong = 0x40087468; -pub const TIOCSWINSZ: ::c_ulong = 0x80087467; -pub const TIOCMGET: ::c_ulong = 0x741d; -pub const TIOCMBIS: ::c_ulong = 0x741b; -pub const TIOCMBIC: ::c_ulong = 0x741c; -pub const TIOCMSET: ::c_ulong = 0x741a; -pub const FIONREAD: ::c_ulong = 0x467f; -pub const TIOCCONS: ::c_ulong = 0x80047478; - -pub const RTLD_DEEPBIND: ::c_int = 0x10; -pub const RTLD_GLOBAL: ::c_int = 0x4; -pub const RTLD_NOLOAD: ::c_int = 0x8; - -pub const LINUX_REBOOT_MAGIC1: ::c_int = 0xfee1dead; -pub const LINUX_REBOOT_MAGIC2: ::c_int = 672274793; -pub const LINUX_REBOOT_MAGIC2A: ::c_int = 85072278; -pub const LINUX_REBOOT_MAGIC2B: ::c_int = 369367448; -pub const LINUX_REBOOT_MAGIC2C: ::c_int = 537993216; - -pub const LINUX_REBOOT_CMD_RESTART: ::c_int = 0x01234567; -pub const LINUX_REBOOT_CMD_HALT: ::c_int = 0xCDEF0123; -pub const LINUX_REBOOT_CMD_CAD_ON: ::c_int = 0x89ABCDEF; -pub const LINUX_REBOOT_CMD_CAD_OFF: ::c_int = 0x00000000; -pub const LINUX_REBOOT_CMD_POWER_OFF: ::c_int = 0x4321FEDC; -pub const LINUX_REBOOT_CMD_RESTART2: ::c_int = 0xA1B2C3D4; -pub const LINUX_REBOOT_CMD_SW_SUSPEND: ::c_int = 0xD000FCE2; -pub const LINUX_REBOOT_CMD_KEXEC: ::c_int = 0x45584543; - -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const SIGSTKSZ: ::size_t = 8192; -pub const CBAUD: ::tcflag_t = 0o0010017; -pub const TAB1: ::c_int = 0x00000800; -pub const TAB2: ::c_int = 0x00001000; -pub const TAB3: ::c_int = 0x00001800; -pub const CR1: ::c_int = 0x00000200; -pub const CR2: ::c_int = 0x00000400; -pub const CR3: ::c_int = 0x00000600; -pub const FF1: ::c_int = 0x00008000; -pub const BS1: ::c_int = 0x00002000; -pub const VT1: ::c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: ::tcflag_t = 0x00000400; -pub const IXOFF: ::tcflag_t = 0x00001000; -pub const ONLCR: ::tcflag_t = 0x4; -pub const CSIZE: ::tcflag_t = 0x00000030; -pub const CS6: ::tcflag_t = 0x00000010; -pub const CS7: ::tcflag_t = 0x00000020; -pub const CS8: ::tcflag_t = 0x00000030; -pub const CSTOPB: ::tcflag_t = 0x00000040; -pub const CREAD: ::tcflag_t = 0x00000080; -pub const PARENB: ::tcflag_t = 0x00000100; -pub const PARODD: ::tcflag_t = 0x00000200; -pub const HUPCL: ::tcflag_t = 0x00000400; -pub const CLOCAL: ::tcflag_t = 0x00000800; -pub const ECHOKE: ::tcflag_t = 0x00000800; -pub const ECHOE: ::tcflag_t = 0x00000010; -pub const ECHOK: ::tcflag_t = 0x00000020; -pub const ECHONL: ::tcflag_t = 0x00000040; -pub const ECHOPRT: ::tcflag_t = 0x00000400; -pub const ECHOCTL: ::tcflag_t = 0x00000200; -pub const ISIG: ::tcflag_t = 0x00000001; -pub const ICANON: ::tcflag_t = 0x00000002; -pub const PENDIN: ::tcflag_t = 0x00004000; -pub const NOFLSH: ::tcflag_t = 0x00000080; - -cfg_if! { - if #[cfg(target_arch = "mips")] { - mod mips32; - pub use self::mips32::*; - } else if #[cfg(target_arch = "mips64")] { - mod mips64; - pub use self::mips64::*; - } else { - // Unknown target_arch - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,770 +0,0 @@ -//! Linux-specific definitions for linux-like values - -use dox::mem; - -pub type useconds_t = u32; -pub type dev_t = u64; -pub type socklen_t = u32; -pub type pthread_t = c_ulong; -pub type mode_t = u32; -pub type ino64_t = u64; -pub type off64_t = i64; -pub type blkcnt64_t = i64; -pub type rlim64_t = u64; -pub type shmatt_t = ::c_ulong; -pub type mqd_t = ::c_int; -pub type msgqnum_t = ::c_ulong; -pub type msglen_t = ::c_ulong; -pub type nfds_t = ::c_ulong; -pub type nl_item = ::c_int; - -pub enum fpos64_t {} // TODO: fill this out with a struct - -s! { - pub struct dirent { - pub d_ino: ::ino_t, - pub d_off: ::off_t, - pub d_reclen: ::c_ushort, - pub d_type: ::c_uchar, - pub d_name: [::c_char; 256], - } - - pub struct dirent64 { - pub d_ino: ::ino64_t, - pub d_off: ::off64_t, - pub d_reclen: ::c_ushort, - pub d_type: ::c_uchar, - pub d_name: [::c_char; 256], - } - - pub struct rlimit64 { - pub rlim_cur: rlim64_t, - pub rlim_max: rlim64_t, - } - - pub struct glob_t { - pub gl_pathc: ::size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: ::size_t, - pub gl_flags: ::c_int, - - __unused1: *mut ::c_void, - __unused2: *mut ::c_void, - __unused3: *mut ::c_void, - __unused4: *mut ::c_void, - __unused5: *mut ::c_void, - } - - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut c_char, - pub ifa_flags: ::c_uint, - pub ifa_addr: *mut ::sockaddr, - pub ifa_netmask: *mut ::sockaddr, - pub ifa_ifu: *mut ::sockaddr, // FIXME This should be a union - pub ifa_data: *mut ::c_void - } - - pub struct pthread_mutex_t { - #[cfg(any(target_arch = "mips", target_arch = "arm", - target_arch = "powerpc"))] - __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", target_arch = "arm", - target_arch = "powerpc")))] - __align: [::c_longlong; 0], - size: [u8; __SIZEOF_PTHREAD_MUTEX_T], - } - - pub struct pthread_rwlock_t { - #[cfg(any(target_arch = "mips", target_arch = "arm", - target_arch = "powerpc"))] - __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", target_arch = "arm", - target_arch = "powerpc")))] - __align: [::c_longlong; 0], - size: [u8; __SIZEOF_PTHREAD_RWLOCK_T], - } - - pub struct pthread_mutexattr_t { - #[cfg(any(target_arch = "x86_64", target_arch = "powerpc64", - target_arch = "mips64", target_arch = "s390x"))] - __align: [::c_int; 0], - #[cfg(not(any(target_arch = "x86_64", target_arch = "powerpc64", - target_arch = "mips64", target_arch = "s390x")))] - __align: [::c_long; 0], - size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], - } - - pub struct pthread_cond_t { - #[cfg(any(target_env = "musl"))] - __align: [*const ::c_void; 0], - #[cfg(not(any(target_env = "musl")))] - __align: [::c_longlong; 0], - size: [u8; __SIZEOF_PTHREAD_COND_T], - } - - pub struct pthread_condattr_t { - __align: [::c_int; 0], - size: [u8; __SIZEOF_PTHREAD_CONDATTR_T], - } - - pub struct passwd { - pub pw_name: *mut ::c_char, - pub pw_passwd: *mut ::c_char, - pub pw_uid: ::uid_t, - pub pw_gid: ::gid_t, - pub pw_gecos: *mut ::c_char, - pub pw_dir: *mut ::c_char, - pub pw_shell: *mut ::c_char, - } - - pub struct spwd { - pub sp_namp: *mut ::c_char, - pub sp_pwdp: *mut ::c_char, - pub sp_lstchg: ::c_long, - pub sp_min: ::c_long, - pub sp_max: ::c_long, - pub sp_warn: ::c_long, - pub sp_inact: ::c_long, - pub sp_expire: ::c_long, - pub sp_flag: ::c_ulong, - } - - pub struct statvfs { - pub f_bsize: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_favail: ::fsfilcnt_t, - #[cfg(target_endian = "little")] - pub f_fsid: ::c_ulong, - #[cfg(target_pointer_width = "32")] - __f_unused: ::c_int, - #[cfg(target_endian = "big")] - pub f_fsid: ::c_ulong, - pub f_flag: ::c_ulong, - pub f_namemax: ::c_ulong, - __f_spare: [::c_int; 6], - } - - pub struct dqblk { - pub dqb_bhardlimit: ::uint64_t, - pub dqb_bsoftlimit: ::uint64_t, - pub dqb_curspace: ::uint64_t, - pub dqb_ihardlimit: ::uint64_t, - pub dqb_isoftlimit: ::uint64_t, - pub dqb_curinodes: ::uint64_t, - pub dqb_btime: ::uint64_t, - pub dqb_itime: ::uint64_t, - pub dqb_valid: ::uint32_t, - } - - pub struct signalfd_siginfo { - pub ssi_signo: ::uint32_t, - pub ssi_errno: ::int32_t, - pub ssi_code: ::int32_t, - pub ssi_pid: ::uint32_t, - pub ssi_uid: ::uint32_t, - pub ssi_fd: ::int32_t, - pub ssi_tid: ::uint32_t, - pub ssi_band: ::uint32_t, - pub ssi_overrun: ::uint32_t, - pub ssi_trapno: ::uint32_t, - pub ssi_status: ::int32_t, - pub ssi_int: ::int32_t, - pub ssi_ptr: ::uint64_t, - pub ssi_utime: ::uint64_t, - pub ssi_stime: ::uint64_t, - pub ssi_addr: ::uint64_t, - _pad: [::uint8_t; 48], - } - - pub struct fsid_t { - __val: [::c_int; 2], - } - - pub struct mq_attr { - pub mq_flags: ::c_long, - pub mq_maxmsg: ::c_long, - pub mq_msgsize: ::c_long, - pub mq_curmsgs: ::c_long, - pad: [::c_long; 4] - } - - pub struct cpu_set_t { - #[cfg(target_pointer_width = "32")] - bits: [u32; 32], - #[cfg(target_pointer_width = "64")] - bits: [u64; 16], - } - - pub struct if_nameindex { - pub if_index: ::c_uint, - pub if_name: *mut ::c_char, - } - - // System V IPC - pub struct msginfo { - pub msgpool: ::c_int, - pub msgmap: ::c_int, - pub msgmax: ::c_int, - pub msgmnb: ::c_int, - pub msgmni: ::c_int, - pub msgssz: ::c_int, - pub msgtql: ::c_int, - pub msgseg: ::c_ushort, - } -} - -pub const ABDAY_1: ::nl_item = 0x20000; -pub const ABDAY_2: ::nl_item = 0x20001; -pub const ABDAY_3: ::nl_item = 0x20002; -pub const ABDAY_4: ::nl_item = 0x20003; -pub const ABDAY_5: ::nl_item = 0x20004; -pub const ABDAY_6: ::nl_item = 0x20005; -pub const ABDAY_7: ::nl_item = 0x20006; - -pub const DAY_1: ::nl_item = 0x20007; -pub const DAY_2: ::nl_item = 0x20008; -pub const DAY_3: ::nl_item = 0x20009; -pub const DAY_4: ::nl_item = 0x2000A; -pub const DAY_5: ::nl_item = 0x2000B; -pub const DAY_6: ::nl_item = 0x2000C; -pub const DAY_7: ::nl_item = 0x2000D; - -pub const ABMON_1: ::nl_item = 0x2000E; -pub const ABMON_2: ::nl_item = 0x2000F; -pub const ABMON_3: ::nl_item = 0x20010; -pub const ABMON_4: ::nl_item = 0x20011; -pub const ABMON_5: ::nl_item = 0x20012; -pub const ABMON_6: ::nl_item = 0x20013; -pub const ABMON_7: ::nl_item = 0x20014; -pub const ABMON_8: ::nl_item = 0x20015; -pub const ABMON_9: ::nl_item = 0x20016; -pub const ABMON_10: ::nl_item = 0x20017; -pub const ABMON_11: ::nl_item = 0x20018; -pub const ABMON_12: ::nl_item = 0x20019; - -pub const CLONE_NEWCGROUP: ::c_int = 0x02000000; - -pub const MON_1: ::nl_item = 0x2001A; -pub const MON_2: ::nl_item = 0x2001B; -pub const MON_3: ::nl_item = 0x2001C; -pub const MON_4: ::nl_item = 0x2001D; -pub const MON_5: ::nl_item = 0x2001E; -pub const MON_6: ::nl_item = 0x2001F; -pub const MON_7: ::nl_item = 0x20020; -pub const MON_8: ::nl_item = 0x20021; -pub const MON_9: ::nl_item = 0x20022; -pub const MON_10: ::nl_item = 0x20023; -pub const MON_11: ::nl_item = 0x20024; -pub const MON_12: ::nl_item = 0x20025; - -pub const AM_STR: ::nl_item = 0x20026; -pub const PM_STR: ::nl_item = 0x20027; - -pub const D_T_FMT: ::nl_item = 0x20028; -pub const D_FMT: ::nl_item = 0x20029; -pub const T_FMT: ::nl_item = 0x2002A; -pub const T_FMT_AMPM: ::nl_item = 0x2002B; - -pub const ERA: ::nl_item = 0x2002C; -pub const ERA_D_FMT: ::nl_item = 0x2002E; -pub const ALT_DIGITS: ::nl_item = 0x2002F; -pub const ERA_D_T_FMT: ::nl_item = 0x20030; -pub const ERA_T_FMT: ::nl_item = 0x20031; - -pub const CODESET: ::nl_item = 14; - -pub const CRNCYSTR: ::nl_item = 0x4000F; - -pub const RUSAGE_THREAD: ::c_int = 1; -pub const RUSAGE_CHILDREN: ::c_int = -1; - -pub const RADIXCHAR: ::nl_item = 0x10000; -pub const THOUSEP: ::nl_item = 0x10001; - -pub const YESEXPR: ::nl_item = 0x50000; -pub const NOEXPR: ::nl_item = 0x50001; -pub const YESSTR: ::nl_item = 0x50002; -pub const NOSTR: ::nl_item = 0x50003; - -pub const FILENAME_MAX: ::c_uint = 4096; -pub const L_tmpnam: ::c_uint = 20; -pub const _PC_LINK_MAX: ::c_int = 0; -pub const _PC_MAX_CANON: ::c_int = 1; -pub const _PC_MAX_INPUT: ::c_int = 2; -pub const _PC_NAME_MAX: ::c_int = 3; -pub const _PC_PATH_MAX: ::c_int = 4; -pub const _PC_PIPE_BUF: ::c_int = 5; -pub const _PC_CHOWN_RESTRICTED: ::c_int = 6; -pub const _PC_NO_TRUNC: ::c_int = 7; -pub const _PC_VDISABLE: ::c_int = 8; - -pub const _SC_ARG_MAX: ::c_int = 0; -pub const _SC_CHILD_MAX: ::c_int = 1; -pub const _SC_CLK_TCK: ::c_int = 2; -pub const _SC_NGROUPS_MAX: ::c_int = 3; -pub const _SC_OPEN_MAX: ::c_int = 4; -pub const _SC_STREAM_MAX: ::c_int = 5; -pub const _SC_TZNAME_MAX: ::c_int = 6; -pub const _SC_JOB_CONTROL: ::c_int = 7; -pub const _SC_SAVED_IDS: ::c_int = 8; -pub const _SC_REALTIME_SIGNALS: ::c_int = 9; -pub const _SC_PRIORITY_SCHEDULING: ::c_int = 10; -pub const _SC_TIMERS: ::c_int = 11; -pub const _SC_ASYNCHRONOUS_IO: ::c_int = 12; -pub const _SC_PRIORITIZED_IO: ::c_int = 13; -pub const _SC_SYNCHRONIZED_IO: ::c_int = 14; -pub const _SC_FSYNC: ::c_int = 15; -pub const _SC_MAPPED_FILES: ::c_int = 16; -pub const _SC_MEMLOCK: ::c_int = 17; -pub const _SC_MEMLOCK_RANGE: ::c_int = 18; -pub const _SC_MEMORY_PROTECTION: ::c_int = 19; -pub const _SC_MESSAGE_PASSING: ::c_int = 20; -pub const _SC_SEMAPHORES: ::c_int = 21; -pub const _SC_SHARED_MEMORY_OBJECTS: ::c_int = 22; -pub const _SC_AIO_LISTIO_MAX: ::c_int = 23; -pub const _SC_AIO_MAX: ::c_int = 24; -pub const _SC_AIO_PRIO_DELTA_MAX: ::c_int = 25; -pub const _SC_DELAYTIMER_MAX: ::c_int = 26; -pub const _SC_MQ_OPEN_MAX: ::c_int = 27; -pub const _SC_MQ_PRIO_MAX: ::c_int = 28; -pub const _SC_VERSION: ::c_int = 29; -pub const _SC_PAGESIZE: ::c_int = 30; -pub const _SC_PAGE_SIZE: ::c_int = _SC_PAGESIZE; -pub const _SC_RTSIG_MAX: ::c_int = 31; -pub const _SC_SEM_NSEMS_MAX: ::c_int = 32; -pub const _SC_SEM_VALUE_MAX: ::c_int = 33; -pub const _SC_SIGQUEUE_MAX: ::c_int = 34; -pub const _SC_TIMER_MAX: ::c_int = 35; -pub const _SC_BC_BASE_MAX: ::c_int = 36; -pub const _SC_BC_DIM_MAX: ::c_int = 37; -pub const _SC_BC_SCALE_MAX: ::c_int = 38; -pub const _SC_BC_STRING_MAX: ::c_int = 39; -pub const _SC_COLL_WEIGHTS_MAX: ::c_int = 40; -pub const _SC_EXPR_NEST_MAX: ::c_int = 42; -pub const _SC_LINE_MAX: ::c_int = 43; -pub const _SC_RE_DUP_MAX: ::c_int = 44; -pub const _SC_2_VERSION: ::c_int = 46; -pub const _SC_2_C_BIND: ::c_int = 47; -pub const _SC_2_C_DEV: ::c_int = 48; -pub const _SC_2_FORT_DEV: ::c_int = 49; -pub const _SC_2_FORT_RUN: ::c_int = 50; -pub const _SC_2_SW_DEV: ::c_int = 51; -pub const _SC_2_LOCALEDEF: ::c_int = 52; -pub const _SC_IOV_MAX: ::c_int = 60; -pub const _SC_THREADS: ::c_int = 67; -pub const _SC_THREAD_SAFE_FUNCTIONS: ::c_int = 68; -pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 69; -pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 70; -pub const _SC_LOGIN_NAME_MAX: ::c_int = 71; -pub const _SC_TTY_NAME_MAX: ::c_int = 72; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: ::c_int = 73; -pub const _SC_THREAD_KEYS_MAX: ::c_int = 74; -pub const _SC_THREAD_STACK_MIN: ::c_int = 75; -pub const _SC_THREAD_THREADS_MAX: ::c_int = 76; -pub const _SC_THREAD_ATTR_STACKADDR: ::c_int = 77; -pub const _SC_THREAD_ATTR_STACKSIZE: ::c_int = 78; -pub const _SC_THREAD_PRIORITY_SCHEDULING: ::c_int = 79; -pub const _SC_THREAD_PRIO_INHERIT: ::c_int = 80; -pub const _SC_THREAD_PRIO_PROTECT: ::c_int = 81; -pub const _SC_NPROCESSORS_ONLN: ::c_int = 84; -pub const _SC_ATEXIT_MAX: ::c_int = 87; -pub const _SC_XOPEN_VERSION: ::c_int = 89; -pub const _SC_XOPEN_XCU_VERSION: ::c_int = 90; -pub const _SC_XOPEN_UNIX: ::c_int = 91; -pub const _SC_XOPEN_CRYPT: ::c_int = 92; -pub const _SC_XOPEN_ENH_I18N: ::c_int = 93; -pub const _SC_XOPEN_SHM: ::c_int = 94; -pub const _SC_2_CHAR_TERM: ::c_int = 95; -pub const _SC_2_UPE: ::c_int = 97; -pub const _SC_XBS5_ILP32_OFF32: ::c_int = 125; -pub const _SC_XBS5_ILP32_OFFBIG: ::c_int = 126; -pub const _SC_XBS5_LPBIG_OFFBIG: ::c_int = 128; -pub const _SC_XOPEN_LEGACY: ::c_int = 129; -pub const _SC_XOPEN_REALTIME: ::c_int = 130; -pub const _SC_XOPEN_REALTIME_THREADS: ::c_int = 131; -pub const _SC_HOST_NAME_MAX: ::c_int = 180; - -pub const RLIM_SAVED_MAX: ::rlim_t = RLIM_INFINITY; -pub const RLIM_SAVED_CUR: ::rlim_t = RLIM_INFINITY; - -pub const GLOB_ERR: ::c_int = 1 << 0; -pub const GLOB_MARK: ::c_int = 1 << 1; -pub const GLOB_NOSORT: ::c_int = 1 << 2; -pub const GLOB_DOOFFS: ::c_int = 1 << 3; -pub const GLOB_NOCHECK: ::c_int = 1 << 4; -pub const GLOB_APPEND: ::c_int = 1 << 5; -pub const GLOB_NOESCAPE: ::c_int = 1 << 6; - -pub const GLOB_NOSPACE: ::c_int = 1; -pub const GLOB_ABORTED: ::c_int = 2; -pub const GLOB_NOMATCH: ::c_int = 3; - -pub const POSIX_MADV_NORMAL: ::c_int = 0; -pub const POSIX_MADV_RANDOM: ::c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: ::c_int = 2; -pub const POSIX_MADV_WILLNEED: ::c_int = 3; - -pub const S_IEXEC: mode_t = 64; -pub const S_IWRITE: mode_t = 128; -pub const S_IREAD: mode_t = 256; - -pub const F_LOCK: ::c_int = 1; -pub const F_TEST: ::c_int = 3; -pub const F_TLOCK: ::c_int = 2; -pub const F_ULOCK: ::c_int = 0; - -pub const ST_RDONLY: ::c_ulong = 1; -pub const ST_NOSUID: ::c_ulong = 2; -pub const ST_NODEV: ::c_ulong = 4; -pub const ST_NOEXEC: ::c_ulong = 8; -pub const ST_SYNCHRONOUS: ::c_ulong = 16; -pub const ST_MANDLOCK: ::c_ulong = 64; -pub const ST_WRITE: ::c_ulong = 128; -pub const ST_APPEND: ::c_ulong = 256; -pub const ST_IMMUTABLE: ::c_ulong = 512; -pub const ST_NOATIME: ::c_ulong = 1024; -pub const ST_NODIRATIME: ::c_ulong = 2048; - -pub const RTLD_NEXT: *mut ::c_void = -1i64 as *mut ::c_void; -pub const RTLD_DEFAULT: *mut ::c_void = 0i64 as *mut ::c_void; -pub const RTLD_NODELETE: ::c_int = 0x1000; -pub const RTLD_NOW: ::c_int = 0x2; - -pub const TCP_MD5SIG: ::c_int = 14; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_MUTEX_T], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_COND_T], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_RWLOCK_T], -}; -pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; -pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1; -pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_NORMAL; -pub const __SIZEOF_PTHREAD_COND_T: usize = 48; - -pub const SCHED_OTHER: ::c_int = 0; -pub const SCHED_FIFO: ::c_int = 1; -pub const SCHED_RR: ::c_int = 2; -pub const SCHED_BATCH: ::c_int = 3; -pub const SCHED_IDLE: ::c_int = 5; - -// System V IPC -pub const IPC_PRIVATE: ::key_t = 0; - -pub const IPC_CREAT: ::c_int = 0o1000; -pub const IPC_EXCL: ::c_int = 0o2000; -pub const IPC_NOWAIT: ::c_int = 0o4000; - -pub const IPC_RMID: ::c_int = 0; -pub const IPC_SET: ::c_int = 1; -pub const IPC_STAT: ::c_int = 2; -pub const IPC_INFO: ::c_int = 3; -pub const MSG_STAT: ::c_int = 11; -pub const MSG_INFO: ::c_int = 12; - -pub const MSG_NOERROR: ::c_int = 0o10000; -pub const MSG_EXCEPT: ::c_int = 0o20000; -pub const MSG_COPY: ::c_int = 0o40000; - -pub const SHM_R: ::c_int = 0o400; -pub const SHM_W: ::c_int = 0o200; - -pub const SHM_RDONLY: ::c_int = 0o10000; -pub const SHM_RND: ::c_int = 0o20000; -pub const SHM_REMAP: ::c_int = 0o40000; -pub const SHM_EXEC: ::c_int = 0o100000; - -pub const SHM_LOCK: ::c_int = 11; -pub const SHM_UNLOCK: ::c_int = 12; - -pub const SHM_HUGETLB: ::c_int = 0o4000; -pub const SHM_NORESERVE: ::c_int = 0o10000; - -pub const EPOLLRDHUP: ::c_int = 0x2000; -pub const EPOLLONESHOT: ::c_int = 0x40000000; - -pub const QFMT_VFS_OLD: ::c_int = 1; -pub const QFMT_VFS_V0: ::c_int = 2; - -pub const SFD_CLOEXEC: ::c_int = 0x080000; - -pub const EFD_SEMAPHORE: ::c_int = 0x1; - -pub const NCCS: usize = 32; - -pub const LOG_NFACILITIES: ::c_int = 24; - -pub const SEM_FAILED: *mut ::sem_t = 0 as *mut sem_t; - -pub const RB_AUTOBOOT: ::c_int = 0x01234567u32 as i32; -pub const RB_HALT_SYSTEM: ::c_int = 0xcdef0123u32 as i32; -pub const RB_ENABLE_CAD: ::c_int = 0x89abcdefu32 as i32; -pub const RB_DISABLE_CAD: ::c_int = 0x00000000u32 as i32; -pub const RB_POWER_OFF: ::c_int = 0x4321fedcu32 as i32; -pub const RB_SW_SUSPEND: ::c_int = 0xd000fce2u32 as i32; -pub const RB_KEXEC: ::c_int = 0x45584543u32 as i32; - -pub const SYNC_FILE_RANGE_WAIT_BEFORE: ::c_uint = 1; -pub const SYNC_FILE_RANGE_WRITE: ::c_uint = 2; -pub const SYNC_FILE_RANGE_WAIT_AFTER: ::c_uint = 4; - -pub const EAI_SYSTEM: ::c_int = -11; - -pub const AIO_CANCELED: ::c_int = 0; -pub const AIO_NOTCANCELED: ::c_int = 1; -pub const AIO_ALLDONE: ::c_int = 2; -pub const LIO_READ: ::c_int = 0; -pub const LIO_WRITE: ::c_int = 1; -pub const LIO_NOP: ::c_int = 2; -pub const LIO_WAIT: ::c_int = 0; -pub const LIO_NOWAIT: ::c_int = 1; - -f! { - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { - for slot in cpuset.bits.iter_mut() { - *slot = 0; - } - } - - pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * mem::size_of_val(&cpuset.bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] |= 1 << offset; - () - } - - pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * mem::size_of_val(&cpuset.bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] &= !(1 << offset); - () - } - - pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { - let size_in_bits = 8 * mem::size_of_val(&cpuset.bits[0]); - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - 0 != (cpuset.bits[idx] & (1 << offset)) - } - - pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { - set1.bits == set2.bits - } -} - -extern { - pub fn aio_read(aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_fsync(op: ::c_int, aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_error(aiocbp: *const aiocb) -> ::c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ::ssize_t; - pub fn aio_suspend(aiocb_list: *const *const aiocb, nitems: ::c_int, - timeout: *const ::timespec) -> ::c_int; - pub fn aio_cancel(fd: ::c_int, aiocbp: *mut aiocb) -> ::c_int; - pub fn lio_listio(mode: ::c_int, aiocb_list: *const *mut aiocb, - nitems: ::c_int, sevp: *mut ::sigevent) -> ::c_int; - - pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int; - - pub fn setpwent(); - pub fn getpwent() -> *mut passwd; - pub fn setspent(); - pub fn endspent(); - pub fn getspent() -> *mut spwd; - pub fn getspnam(__name: *const ::c_char) -> *mut spwd; - - pub fn shm_open(name: *const c_char, oflag: ::c_int, - mode: mode_t) -> ::c_int; - - // System V IPC - pub fn shmget(key: ::key_t, size: ::size_t, shmflg: ::c_int) -> ::c_int; - pub fn shmat(shmid: ::c_int, - shmaddr: *const ::c_void, - shmflg: ::c_int) -> *mut ::c_void; - pub fn shmdt(shmaddr: *const ::c_void) -> ::c_int; - pub fn shmctl(shmid: ::c_int, - cmd: ::c_int, - buf: *mut ::shmid_ds) -> ::c_int; - pub fn ftok(pathname: *const ::c_char, proj_id: ::c_int) -> ::key_t; - pub fn msgctl(msqid: ::c_int, cmd: ::c_int, buf: *mut msqid_ds) -> ::c_int; - pub fn msgget(key: ::key_t, msgflg: ::c_int) -> ::c_int; - pub fn msgrcv(msqid: ::c_int, msgp: *mut ::c_void, msgsz: ::size_t, - msgtyp: ::c_long, msgflg: ::c_int) -> ::ssize_t; - pub fn msgsnd(msqid: ::c_int, msgp: *const ::c_void, msgsz: ::size_t, - msgflg: ::c_int) -> ::c_int; - - pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int) - -> ::c_int; - pub fn __errno_location() -> *mut ::c_int; - - pub fn fopen64(filename: *const c_char, - mode: *const c_char) -> *mut ::FILE; - pub fn freopen64(filename: *const c_char, mode: *const c_char, - file: *mut ::FILE) -> *mut ::FILE; - pub fn tmpfile64() -> *mut ::FILE; - pub fn fgetpos64(stream: *mut ::FILE, ptr: *mut fpos64_t) -> ::c_int; - pub fn fsetpos64(stream: *mut ::FILE, ptr: *const fpos64_t) -> ::c_int; - pub fn fseeko64(stream: *mut ::FILE, - offset: ::off64_t, - whence: ::c_int) -> ::c_int; - pub fn ftello64(stream: *mut ::FILE) -> ::off64_t; - pub fn fallocate(fd: ::c_int, mode: ::c_int, - offset: ::off_t, len: ::off_t) -> ::c_int; - pub fn posix_fallocate(fd: ::c_int, offset: ::off_t, - len: ::off_t) -> ::c_int; - pub fn readahead(fd: ::c_int, offset: ::off64_t, - count: ::size_t) -> ::ssize_t; - pub fn getxattr(path: *const c_char, name: *const c_char, - value: *mut ::c_void, size: ::size_t) -> ::ssize_t; - pub fn lgetxattr(path: *const c_char, name: *const c_char, - value: *mut ::c_void, size: ::size_t) -> ::ssize_t; - pub fn fgetxattr(filedes: ::c_int, name: *const c_char, - value: *mut ::c_void, size: ::size_t) -> ::ssize_t; - pub fn setxattr(path: *const c_char, name: *const c_char, - value: *const ::c_void, size: ::size_t, - flags: ::c_int) -> ::c_int; - pub fn lsetxattr(path: *const c_char, name: *const c_char, - value: *const ::c_void, size: ::size_t, - flags: ::c_int) -> ::c_int; - pub fn fsetxattr(filedes: ::c_int, name: *const c_char, - value: *const ::c_void, size: ::size_t, - flags: ::c_int) -> ::c_int; - pub fn listxattr(path: *const c_char, list: *mut c_char, - size: ::size_t) -> ::ssize_t; - pub fn llistxattr(path: *const c_char, list: *mut c_char, - size: ::size_t) -> ::ssize_t; - pub fn flistxattr(filedes: ::c_int, list: *mut c_char, - size: ::size_t) -> ::ssize_t; - pub fn removexattr(path: *const c_char, name: *const c_char) -> ::c_int; - pub fn lremovexattr(path: *const c_char, name: *const c_char) -> ::c_int; - pub fn fremovexattr(filedes: ::c_int, name: *const c_char) -> ::c_int; - pub fn signalfd(fd: ::c_int, - mask: *const ::sigset_t, - flags: ::c_int) -> ::c_int; - pub fn pwritev(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int, - offset: ::off_t) -> ::ssize_t; - pub fn preadv(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int, - offset: ::off_t) -> ::ssize_t; - pub fn quotactl(cmd: ::c_int, - special: *const ::c_char, - id: ::c_int, - data: *mut ::c_char) -> ::c_int; - pub fn mq_open(name: *const ::c_char, oflag: ::c_int, ...) -> ::mqd_t; - pub fn mq_close(mqd: ::mqd_t) -> ::c_int; - pub fn mq_unlink(name: *const ::c_char) -> ::c_int; - pub fn mq_receive(mqd: ::mqd_t, - msg_ptr: *mut ::c_char, - msg_len: ::size_t, - msq_prio: *mut ::c_uint) -> ::ssize_t; - pub fn mq_send(mqd: ::mqd_t, - msg_ptr: *const ::c_char, - msg_len: ::size_t, - msq_prio: ::c_uint) -> ::c_int; - pub fn mq_getattr(mqd: ::mqd_t, attr: *mut ::mq_attr) -> ::c_int; - pub fn mq_setattr(mqd: ::mqd_t, - newattr: *const ::mq_attr, - oldattr: *mut ::mq_attr) -> ::c_int; - pub fn epoll_pwait(epfd: ::c_int, - events: *mut ::epoll_event, - maxevents: ::c_int, - timeout: ::c_int, - sigmask: *const ::sigset_t) -> ::c_int; - pub fn dup3(oldfd: ::c_int, newfd: ::c_int, flags: ::c_int) -> ::c_int; - pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int; - pub fn mkostemp(template: *mut ::c_char, flags: ::c_int) -> ::c_int; - pub fn mkostemps(template: *mut ::c_char, - suffixlen: ::c_int, - flags: ::c_int) -> ::c_int; - pub fn sigtimedwait(set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const ::timespec) -> ::c_int; - pub fn sigwaitinfo(set: *const sigset_t, - info: *mut siginfo_t) -> ::c_int; - pub fn openpty(amaster: *mut ::c_int, - aslave: *mut ::c_int, - name: *mut ::c_char, - termp: *const termios, - winp: *const ::winsize) -> ::c_int; - pub fn forkpty(amaster: *mut ::c_int, - name: *mut ::c_char, - termp: *const termios, - winp: *const ::winsize) -> ::pid_t; - pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; - pub fn getnameinfo(sa: *const ::sockaddr, - salen: ::socklen_t, - host: *mut ::c_char, - hostlen: ::socklen_t, - serv: *mut ::c_char, - sevlen: ::socklen_t, - flags: ::c_int) -> ::c_int; - pub fn prlimit(pid: ::pid_t, resource: ::c_int, new_limit: *const ::rlimit, - old_limit: *mut ::rlimit) -> ::c_int; - pub fn prlimit64(pid: ::pid_t, - resource: ::c_int, - new_limit: *const ::rlimit64, - old_limit: *mut ::rlimit64) -> ::c_int; - pub fn getloadavg(loadavg: *mut ::c_double, nelem: ::c_int) -> ::c_int; - pub fn process_vm_readv(pid: ::pid_t, - local_iov: *const ::iovec, - liovcnt: ::c_ulong, - remote_iov: *const ::iovec, - riovcnt: ::c_ulong, - flags: ::c_ulong) -> isize; - pub fn process_vm_writev(pid: ::pid_t, - local_iov: *const ::iovec, - liovcnt: ::c_ulong, - remote_iov: *const ::iovec, - riovcnt: ::c_ulong, - flags: ::c_ulong) -> isize; - pub fn reboot(how_to: ::c_int) -> ::c_int; - pub fn setfsgid(gid: ::gid_t) -> ::c_int; - pub fn setfsuid(uid: ::uid_t) -> ::c_int; - pub fn setresgid(rgid: ::gid_t, egid: ::gid_t, sgid: ::gid_t) -> ::c_int; - pub fn setresuid(ruid: ::uid_t, euid: ::uid_t, suid: ::uid_t) -> ::c_int; - - // Not available now on Android - pub fn mkfifoat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t) -> ::c_int; - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); - pub fn sync_file_range(fd: ::c_int, offset: ::off64_t, - nbytes: ::off64_t, flags: ::c_uint) -> ::c_int; - pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int; - pub fn freeifaddrs(ifa: *mut ::ifaddrs); -} - -cfg_if! { - if #[cfg(any(target_env = "musl", - target_os = "fuchsia", - target_os = "emscripten"))] { - mod musl; - pub use self::musl::*; - } else if #[cfg(any(target_arch = "mips", - target_arch = "mips64"))] { - mod mips; - pub use self::mips::*; - } else if #[cfg(any(target_arch = "s390x"))] { - mod s390x; - pub use self::s390x::*; - } else { - mod other; - pub use self::other::*; - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/arm.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/arm.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/arm.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/arm.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,337 +0,0 @@ -pub type c_char = u8; -pub type wchar_t = u32; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - __st_dev_padding: ::c_int, - __st_ino_truncated: ::c_long, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __st_rdev_padding: ::c_int, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_ino: ::ino_t, - } - - pub struct stat64 { - pub st_dev: ::dev_t, - __st_dev_padding: ::c_int, - __st_ino_truncated: ::c_long, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __st_rdev_padding: ::c_int, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_ino: ::ino_t, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_flags: ::c_int, - pub ss_size: ::size_t - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - __unused1: ::c_int, - pub shm_dtime: ::time_t, - __unused2: ::c_int, - pub shm_ctime: ::time_t, - __unused3: ::c_int, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::c_ulong, - __pad1: ::c_ulong, - __pad2: ::c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - pub msg_stime: ::time_t, - __unused1: ::c_int, - pub msg_rtime: ::time_t, - __unused2: ::c_int, - pub msg_ctime: ::time_t, - __unused3: ::c_int, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __pad1: ::c_ulong, - __pad2: ::c_ulong, - } - - pub struct statfs { - pub f_type: ::c_ulong, - pub f_bsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_fsid: ::fsid_t, - pub f_namelen: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_flags: ::c_ulong, - pub f_spare: [::c_ulong; 4], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_errno: ::c_int, - pub si_code: ::c_int, - pub _pad: [::c_int; 29], - _align: [usize; 0], - } -} - -pub const O_DIRECT: ::c_int = 0x4000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_NOFOLLOW: ::c_int = 0x20000; -pub const O_ASYNC: ::c_int = 0x2000; - -pub const FIOCLEX: ::c_int = 0x5451; -pub const FIONBIO: ::c_int = 0x5421; - -pub const RLIMIT_RSS: ::c_int = 5; -pub const RLIMIT_NOFILE: ::c_int = 7; -pub const RLIMIT_AS: ::c_int = 9; -pub const RLIMIT_NPROC: ::c_int = 6; -pub const RLIMIT_MEMLOCK: ::c_int = 8; - -pub const O_APPEND: ::c_int = 1024; -pub const O_CREAT: ::c_int = 64; -pub const O_EXCL: ::c_int = 128; -pub const O_NOCTTY: ::c_int = 256; -pub const O_NONBLOCK: ::c_int = 2048; -pub const O_SYNC: ::c_int = 1052672; -pub const O_RSYNC: ::c_int = 1052672; -pub const O_DSYNC: ::c_int = 4096; - -pub const SOCK_NONBLOCK: ::c_int = 2048; - -pub const MAP_ANON: ::c_int = 0x0020; -pub const MAP_GROWSDOWN: ::c_int = 0x0100; -pub const MAP_DENYWRITE: ::c_int = 0x0800; -pub const MAP_EXECUTABLE: ::c_int = 0x01000; -pub const MAP_LOCKED: ::c_int = 0x02000; -pub const MAP_NORESERVE: ::c_int = 0x04000; -pub const MAP_POPULATE: ::c_int = 0x08000; -pub const MAP_NONBLOCK: ::c_int = 0x010000; -pub const MAP_STACK: ::c_int = 0x020000; - -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; -pub const SOCK_SEQPACKET: ::c_int = 5; - -pub const SOL_SOCKET: ::c_int = 1; - -pub const EDEADLK: ::c_int = 35; -pub const ENAMETOOLONG: ::c_int = 36; -pub const ENOLCK: ::c_int = 37; -pub const ENOSYS: ::c_int = 38; -pub const ENOTEMPTY: ::c_int = 39; -pub const ELOOP: ::c_int = 40; -pub const ENOMSG: ::c_int = 42; -pub const EIDRM: ::c_int = 43; -pub const ECHRNG: ::c_int = 44; -pub const EL2NSYNC: ::c_int = 45; -pub const EL3HLT: ::c_int = 46; -pub const EL3RST: ::c_int = 47; -pub const ELNRNG: ::c_int = 48; -pub const EUNATCH: ::c_int = 49; -pub const ENOCSI: ::c_int = 50; -pub const EL2HLT: ::c_int = 51; -pub const EBADE: ::c_int = 52; -pub const EBADR: ::c_int = 53; -pub const EXFULL: ::c_int = 54; -pub const ENOANO: ::c_int = 55; -pub const EBADRQC: ::c_int = 56; -pub const EBADSLT: ::c_int = 57; -pub const EDEADLOCK: ::c_int = EDEADLK; -pub const EMULTIHOP: ::c_int = 72; -pub const EBADMSG: ::c_int = 74; -pub const EOVERFLOW: ::c_int = 75; -pub const ENOTUNIQ: ::c_int = 76; -pub const EBADFD: ::c_int = 77; -pub const EREMCHG: ::c_int = 78; -pub const ELIBACC: ::c_int = 79; -pub const ELIBBAD: ::c_int = 80; -pub const ELIBSCN: ::c_int = 81; -pub const ELIBMAX: ::c_int = 82; -pub const ELIBEXEC: ::c_int = 83; -pub const EILSEQ: ::c_int = 84; -pub const ERESTART: ::c_int = 85; -pub const ESTRPIPE: ::c_int = 86; -pub const EUSERS: ::c_int = 87; -pub const ENOTSOCK: ::c_int = 88; -pub const EDESTADDRREQ: ::c_int = 89; -pub const EMSGSIZE: ::c_int = 90; -pub const EPROTOTYPE: ::c_int = 91; -pub const ENOPROTOOPT: ::c_int = 92; -pub const EPROTONOSUPPORT: ::c_int = 93; -pub const ESOCKTNOSUPPORT: ::c_int = 94; -pub const EOPNOTSUPP: ::c_int = 95; -pub const ENOTSUP: ::c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: ::c_int = 96; -pub const EAFNOSUPPORT: ::c_int = 97; -pub const EADDRINUSE: ::c_int = 98; -pub const EADDRNOTAVAIL: ::c_int = 99; -pub const ENETDOWN: ::c_int = 100; -pub const ENETUNREACH: ::c_int = 101; -pub const ENETRESET: ::c_int = 102; -pub const ECONNABORTED: ::c_int = 103; -pub const ECONNRESET: ::c_int = 104; -pub const ENOBUFS: ::c_int = 105; -pub const EISCONN: ::c_int = 106; -pub const ENOTCONN: ::c_int = 107; -pub const ESHUTDOWN: ::c_int = 108; -pub const ETOOMANYREFS: ::c_int = 109; -pub const ETIMEDOUT: ::c_int = 110; -pub const ECONNREFUSED: ::c_int = 111; -pub const EHOSTDOWN: ::c_int = 112; -pub const EHOSTUNREACH: ::c_int = 113; -pub const EALREADY: ::c_int = 114; -pub const EINPROGRESS: ::c_int = 115; -pub const ESTALE: ::c_int = 116; -pub const EUCLEAN: ::c_int = 117; -pub const ENOTNAM: ::c_int = 118; -pub const ENAVAIL: ::c_int = 119; -pub const EISNAM: ::c_int = 120; -pub const EREMOTEIO: ::c_int = 121; -pub const EDQUOT: ::c_int = 122; -pub const ENOMEDIUM: ::c_int = 123; -pub const EMEDIUMTYPE: ::c_int = 124; -pub const ECANCELED: ::c_int = 125; -pub const ENOKEY: ::c_int = 126; -pub const EKEYEXPIRED: ::c_int = 127; -pub const EKEYREVOKED: ::c_int = 128; -pub const EKEYREJECTED: ::c_int = 129; -pub const EOWNERDEAD: ::c_int = 130; -pub const ENOTRECOVERABLE: ::c_int = 131; -pub const ERFKILL: ::c_int = 132; -pub const EHWPOISON: ::c_int = 133; - -pub const SO_REUSEADDR: ::c_int = 2; -pub const SO_TYPE: ::c_int = 3; -pub const SO_ERROR: ::c_int = 4; -pub const SO_DONTROUTE: ::c_int = 5; -pub const SO_BROADCAST: ::c_int = 6; -pub const SO_SNDBUF: ::c_int = 7; -pub const SO_RCVBUF: ::c_int = 8; -pub const SO_KEEPALIVE: ::c_int = 9; -pub const SO_OOBINLINE: ::c_int = 10; -pub const SO_LINGER: ::c_int = 13; -pub const SO_REUSEPORT: ::c_int = 15; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; -pub const SO_ACCEPTCONN: ::c_int = 30; - -pub const SA_ONSTACK: ::c_int = 0x08000000; -pub const SA_SIGINFO: ::c_int = 0x00000004; -pub const SA_NOCLDWAIT: ::c_int = 0x00000002; - -pub const SIGCHLD: ::c_int = 17; -pub const SIGBUS: ::c_int = 7; -pub const SIGTTIN: ::c_int = 21; -pub const SIGTTOU: ::c_int = 22; -pub const SIGXCPU: ::c_int = 24; -pub const SIGXFSZ: ::c_int = 25; -pub const SIGVTALRM: ::c_int = 26; -pub const SIGPROF: ::c_int = 27; -pub const SIGWINCH: ::c_int = 28; -pub const SIGUSR1: ::c_int = 10; -pub const SIGUSR2: ::c_int = 12; -pub const SIGCONT: ::c_int = 18; -pub const SIGSTOP: ::c_int = 19; -pub const SIGTSTP: ::c_int = 20; -pub const SIGURG: ::c_int = 23; -pub const SIGIO: ::c_int = 29; -pub const SIGSYS: ::c_int = 31; -pub const SIGSTKFLT: ::c_int = 16; -pub const SIGPOLL: ::c_int = 29; -pub const SIGPWR: ::c_int = 30; -pub const SIG_SETMASK: ::c_int = 2; -pub const SIG_BLOCK: ::c_int = 0x000000; -pub const SIG_UNBLOCK: ::c_int = 0x01; - -pub const EXTPROC: ::tcflag_t = 0x00010000; - -pub const MAP_HUGETLB: ::c_int = 0x040000; - -pub const F_GETLK: ::c_int = 12; -pub const F_GETOWN: ::c_int = 9; -pub const F_SETLK: ::c_int = 13; -pub const F_SETLKW: ::c_int = 14; -pub const F_SETOWN: ::c_int = 8; - -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; - -pub const TCGETS: ::c_int = 0x5401; -pub const TCSETS: ::c_int = 0x5402; -pub const TCSETSW: ::c_int = 0x5403; -pub const TCSETSF: ::c_int = 0x5404; -pub const TCGETA: ::c_int = 0x5405; -pub const TCSETA: ::c_int = 0x5406; -pub const TCSETAW: ::c_int = 0x5407; -pub const TCSETAF: ::c_int = 0x5408; -pub const TCSBRK: ::c_int = 0x5409; -pub const TCXONC: ::c_int = 0x540A; -pub const TCFLSH: ::c_int = 0x540B; -pub const TIOCGSOFTCAR: ::c_int = 0x5419; -pub const TIOCSSOFTCAR: ::c_int = 0x541A; -pub const TIOCLINUX: ::c_int = 0x541C; -pub const TIOCGSERIAL: ::c_int = 0x541E; -pub const TIOCEXCL: ::c_int = 0x540C; -pub const TIOCNXCL: ::c_int = 0x540D; -pub const TIOCSCTTY: ::c_int = 0x540E; -pub const TIOCGPGRP: ::c_int = 0x540F; -pub const TIOCSPGRP: ::c_int = 0x5410; -pub const TIOCOUTQ: ::c_int = 0x5411; -pub const TIOCSTI: ::c_int = 0x5412; -pub const TIOCGWINSZ: ::c_int = 0x5413; -pub const TIOCSWINSZ: ::c_int = 0x5414; -pub const TIOCMGET: ::c_int = 0x5415; -pub const TIOCMBIS: ::c_int = 0x5416; -pub const TIOCMBIC: ::c_int = 0x5417; -pub const TIOCMSET: ::c_int = 0x5418; -pub const FIONREAD: ::c_int = 0x541B; -pub const TIOCCONS: ::c_int = 0x541D; - -pub const SYS_gettid: ::c_long = 224; -pub const SYS_perf_event_open: ::c_long = 364; - -pub const POLLWRNORM: ::c_short = 0x100; -pub const POLLWRBAND: ::c_short = 0x200; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/asmjs.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/asmjs.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/asmjs.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/asmjs.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,336 +0,0 @@ -pub type c_char = u8; -pub type wchar_t = u32; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - __st_dev_padding: ::c_int, - __st_ino_truncated: ::c_long, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __st_rdev_padding: ::c_int, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_ino: ::ino_t, - } - - pub struct stat64 { - pub st_dev: ::dev_t, - __st_dev_padding: ::c_int, - __st_ino_truncated: ::c_long, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __st_rdev_padding: ::c_int, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_ino: ::ino_t, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_flags: ::c_int, - pub ss_size: ::size_t - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - __unused1: ::c_int, - pub shm_dtime: ::time_t, - __unused2: ::c_int, - pub shm_ctime: ::time_t, - __unused3: ::c_int, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::c_ulong, - __pad1: ::c_ulong, - __pad2: ::c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - pub msg_stime: ::time_t, - __unused1: ::c_int, - pub msg_rtime: ::time_t, - __unused2: ::c_int, - pub msg_ctime: ::time_t, - __unused3: ::c_int, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __pad1: ::c_ulong, - __pad2: ::c_ulong, - } - - pub struct statfs { - pub f_type: ::c_ulong, - pub f_bsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_fsid: ::fsid_t, - pub f_namelen: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_flags: ::c_ulong, - pub f_spare: [::c_ulong; 4], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_errno: ::c_int, - pub si_code: ::c_int, - pub _pad: [::c_int; 29], - _align: [usize; 0], - } -} - -pub const O_DIRECT: ::c_int = 0x4000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_NOFOLLOW: ::c_int = 0x20000; -pub const O_ASYNC: ::c_int = 0x2000; - -pub const FIOCLEX: ::c_int = 0x5451; -pub const FIONBIO: ::c_int = 0x5421; - -pub const RLIMIT_RSS: ::c_int = 5; -pub const RLIMIT_NOFILE: ::c_int = 7; -pub const RLIMIT_AS: ::c_int = 9; -pub const RLIMIT_NPROC: ::c_int = 6; -pub const RLIMIT_MEMLOCK: ::c_int = 8; - -pub const O_APPEND: ::c_int = 1024; -pub const O_CREAT: ::c_int = 64; -pub const O_EXCL: ::c_int = 128; -pub const O_NOCTTY: ::c_int = 256; -pub const O_NONBLOCK: ::c_int = 2048; -pub const O_SYNC: ::c_int = 1052672; -pub const O_RSYNC: ::c_int = 1052672; -pub const O_DSYNC: ::c_int = 4096; - -pub const SOCK_NONBLOCK: ::c_int = 2048; - -pub const MAP_ANON: ::c_int = 0x0020; -pub const MAP_GROWSDOWN: ::c_int = 0x0100; -pub const MAP_DENYWRITE: ::c_int = 0x0800; -pub const MAP_EXECUTABLE: ::c_int = 0x01000; -pub const MAP_LOCKED: ::c_int = 0x02000; -pub const MAP_NORESERVE: ::c_int = 0x04000; -pub const MAP_POPULATE: ::c_int = 0x08000; -pub const MAP_NONBLOCK: ::c_int = 0x010000; -pub const MAP_STACK: ::c_int = 0x020000; - -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; -pub const SOCK_SEQPACKET: ::c_int = 5; - -pub const SOL_SOCKET: ::c_int = 1; - -pub const EDEADLK: ::c_int = 35; -pub const ENAMETOOLONG: ::c_int = 36; -pub const ENOLCK: ::c_int = 37; -pub const ENOSYS: ::c_int = 38; -pub const ENOTEMPTY: ::c_int = 39; -pub const ELOOP: ::c_int = 40; -pub const ENOMSG: ::c_int = 42; -pub const EIDRM: ::c_int = 43; -pub const ECHRNG: ::c_int = 44; -pub const EL2NSYNC: ::c_int = 45; -pub const EL3HLT: ::c_int = 46; -pub const EL3RST: ::c_int = 47; -pub const ELNRNG: ::c_int = 48; -pub const EUNATCH: ::c_int = 49; -pub const ENOCSI: ::c_int = 50; -pub const EL2HLT: ::c_int = 51; -pub const EBADE: ::c_int = 52; -pub const EBADR: ::c_int = 53; -pub const EXFULL: ::c_int = 54; -pub const ENOANO: ::c_int = 55; -pub const EBADRQC: ::c_int = 56; -pub const EBADSLT: ::c_int = 57; -pub const EDEADLOCK: ::c_int = EDEADLK; -pub const EMULTIHOP: ::c_int = 72; -pub const EBADMSG: ::c_int = 74; -pub const EOVERFLOW: ::c_int = 75; -pub const ENOTUNIQ: ::c_int = 76; -pub const EBADFD: ::c_int = 77; -pub const EREMCHG: ::c_int = 78; -pub const ELIBACC: ::c_int = 79; -pub const ELIBBAD: ::c_int = 80; -pub const ELIBSCN: ::c_int = 81; -pub const ELIBMAX: ::c_int = 82; -pub const ELIBEXEC: ::c_int = 83; -pub const EILSEQ: ::c_int = 84; -pub const ERESTART: ::c_int = 85; -pub const ESTRPIPE: ::c_int = 86; -pub const EUSERS: ::c_int = 87; -pub const ENOTSOCK: ::c_int = 88; -pub const EDESTADDRREQ: ::c_int = 89; -pub const EMSGSIZE: ::c_int = 90; -pub const EPROTOTYPE: ::c_int = 91; -pub const ENOPROTOOPT: ::c_int = 92; -pub const EPROTONOSUPPORT: ::c_int = 93; -pub const ESOCKTNOSUPPORT: ::c_int = 94; -pub const EOPNOTSUPP: ::c_int = 95; -pub const ENOTSUP: ::c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: ::c_int = 96; -pub const EAFNOSUPPORT: ::c_int = 97; -pub const EADDRINUSE: ::c_int = 98; -pub const EADDRNOTAVAIL: ::c_int = 99; -pub const ENETDOWN: ::c_int = 100; -pub const ENETUNREACH: ::c_int = 101; -pub const ENETRESET: ::c_int = 102; -pub const ECONNABORTED: ::c_int = 103; -pub const ECONNRESET: ::c_int = 104; -pub const ENOBUFS: ::c_int = 105; -pub const EISCONN: ::c_int = 106; -pub const ENOTCONN: ::c_int = 107; -pub const ESHUTDOWN: ::c_int = 108; -pub const ETOOMANYREFS: ::c_int = 109; -pub const ETIMEDOUT: ::c_int = 110; -pub const ECONNREFUSED: ::c_int = 111; -pub const EHOSTDOWN: ::c_int = 112; -pub const EHOSTUNREACH: ::c_int = 113; -pub const EALREADY: ::c_int = 114; -pub const EINPROGRESS: ::c_int = 115; -pub const ESTALE: ::c_int = 116; -pub const EUCLEAN: ::c_int = 117; -pub const ENOTNAM: ::c_int = 118; -pub const ENAVAIL: ::c_int = 119; -pub const EISNAM: ::c_int = 120; -pub const EREMOTEIO: ::c_int = 121; -pub const EDQUOT: ::c_int = 122; -pub const ENOMEDIUM: ::c_int = 123; -pub const EMEDIUMTYPE: ::c_int = 124; -pub const ECANCELED: ::c_int = 125; -pub const ENOKEY: ::c_int = 126; -pub const EKEYEXPIRED: ::c_int = 127; -pub const EKEYREVOKED: ::c_int = 128; -pub const EKEYREJECTED: ::c_int = 129; -pub const EOWNERDEAD: ::c_int = 130; -pub const ENOTRECOVERABLE: ::c_int = 131; -pub const ERFKILL: ::c_int = 132; -pub const EHWPOISON: ::c_int = 133; - -pub const SO_REUSEADDR: ::c_int = 2; -pub const SO_TYPE: ::c_int = 3; -pub const SO_ERROR: ::c_int = 4; -pub const SO_DONTROUTE: ::c_int = 5; -pub const SO_BROADCAST: ::c_int = 6; -pub const SO_SNDBUF: ::c_int = 7; -pub const SO_RCVBUF: ::c_int = 8; -pub const SO_KEEPALIVE: ::c_int = 9; -pub const SO_OOBINLINE: ::c_int = 10; -pub const SO_LINGER: ::c_int = 13; -pub const SO_REUSEPORT: ::c_int = 15; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; -pub const SO_ACCEPTCONN: ::c_int = 30; - -pub const SA_ONSTACK: ::c_int = 0x08000000; -pub const SA_SIGINFO: ::c_int = 0x00000004; -pub const SA_NOCLDWAIT: ::c_int = 0x00000002; - -pub const SIGCHLD: ::c_int = 17; -pub const SIGBUS: ::c_int = 7; -pub const SIGTTIN: ::c_int = 21; -pub const SIGTTOU: ::c_int = 22; -pub const SIGXCPU: ::c_int = 24; -pub const SIGXFSZ: ::c_int = 25; -pub const SIGVTALRM: ::c_int = 26; -pub const SIGPROF: ::c_int = 27; -pub const SIGWINCH: ::c_int = 28; -pub const SIGUSR1: ::c_int = 10; -pub const SIGUSR2: ::c_int = 12; -pub const SIGCONT: ::c_int = 18; -pub const SIGSTOP: ::c_int = 19; -pub const SIGTSTP: ::c_int = 20; -pub const SIGURG: ::c_int = 23; -pub const SIGIO: ::c_int = 29; -pub const SIGSYS: ::c_int = 31; -pub const SIGSTKFLT: ::c_int = 16; -pub const SIGPOLL: ::c_int = 29; -pub const SIGPWR: ::c_int = 30; -pub const SIG_SETMASK: ::c_int = 2; -pub const SIG_BLOCK: ::c_int = 0x000000; -pub const SIG_UNBLOCK: ::c_int = 0x01; - -pub const EXTPROC: ::tcflag_t = 0x00010000; - -pub const MAP_HUGETLB: ::c_int = 0x040000; - -pub const F_GETLK: ::c_int = 12; -pub const F_GETOWN: ::c_int = 9; -pub const F_SETLK: ::c_int = 13; -pub const F_SETLKW: ::c_int = 14; -pub const F_SETOWN: ::c_int = 8; - -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; - -pub const TCGETS: ::c_int = 0x5401; -pub const TCSETS: ::c_int = 0x5402; -pub const TCSETSW: ::c_int = 0x5403; -pub const TCSETSF: ::c_int = 0x5404; -pub const TCGETA: ::c_int = 0x5405; -pub const TCSETA: ::c_int = 0x5406; -pub const TCSETAW: ::c_int = 0x5407; -pub const TCSETAF: ::c_int = 0x5408; -pub const TCSBRK: ::c_int = 0x5409; -pub const TCXONC: ::c_int = 0x540A; -pub const TCFLSH: ::c_int = 0x540B; -pub const TIOCGSOFTCAR: ::c_int = 0x5419; -pub const TIOCSSOFTCAR: ::c_int = 0x541A; -pub const TIOCLINUX: ::c_int = 0x541C; -pub const TIOCGSERIAL: ::c_int = 0x541E; -pub const TIOCEXCL: ::c_int = 0x540C; -pub const TIOCNXCL: ::c_int = 0x540D; -pub const TIOCSCTTY: ::c_int = 0x540E; -pub const TIOCGPGRP: ::c_int = 0x540F; -pub const TIOCSPGRP: ::c_int = 0x5410; -pub const TIOCOUTQ: ::c_int = 0x5411; -pub const TIOCSTI: ::c_int = 0x5412; -pub const TIOCGWINSZ: ::c_int = 0x5413; -pub const TIOCSWINSZ: ::c_int = 0x5414; -pub const TIOCMGET: ::c_int = 0x5415; -pub const TIOCMBIS: ::c_int = 0x5416; -pub const TIOCMBIC: ::c_int = 0x5417; -pub const TIOCMSET: ::c_int = 0x5418; -pub const FIONREAD: ::c_int = 0x541B; -pub const TIOCCONS: ::c_int = 0x541D; - -pub const SYS_gettid: ::c_long = 224; // Valid for arm (32-bit) and x86 (32-bit) - -pub const POLLWRNORM: ::c_short = 0x100; -pub const POLLWRBAND: ::c_short = 0x200; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/mips.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/mips.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/mips.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/mips.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,345 +0,0 @@ -pub type c_char = i8; -pub type wchar_t = ::c_int; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - __st_padding1: [::c_long; 2], - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __st_padding2: [::c_long; 2], - pub st_size: ::off_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_blksize: ::blksize_t, - __st_padding3: ::c_long, - pub st_blocks: ::blkcnt_t, - __st_padding4: [::c_long; 14], - } - - pub struct stat64 { - pub st_dev: ::dev_t, - __st_padding1: [::c_long; 2], - pub st_ino: ::ino64_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __st_padding2: [::c_long; 2], - pub st_size: ::off_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_blksize: ::blksize_t, - __st_padding3: ::c_long, - pub st_blocks: ::blkcnt64_t, - __st_padding4: [::c_long; 14], - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_size: ::size_t, - pub ss_flags: ::c_int, - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - pub shm_dtime: ::time_t, - pub shm_ctime: ::time_t, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::c_ulong, - __pad1: ::c_ulong, - __pad2: ::c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - #[cfg(target_endian = "big")] - __unused1: ::c_int, - pub msg_stime: ::time_t, - #[cfg(target_endian = "little")] - __unused1: ::c_int, - #[cfg(target_endian = "big")] - __unused2: ::c_int, - pub msg_rtime: ::time_t, - #[cfg(target_endian = "little")] - __unused2: ::c_int, - #[cfg(target_endian = "big")] - __unused3: ::c_int, - pub msg_ctime: ::time_t, - #[cfg(target_endian = "little")] - __unused3: ::c_int, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __pad1: ::c_ulong, - __pad2: ::c_ulong, - } - - pub struct statfs { - pub f_type: ::c_ulong, - pub f_bsize: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_fsid: ::fsid_t, - pub f_namelen: ::c_ulong, - pub f_flags: ::c_ulong, - pub f_spare: [::c_ulong; 5], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_code: ::c_int, - pub si_errno: ::c_int, - pub _pad: [::c_int; 29], - _align: [usize; 0], - } -} - -pub const O_DIRECT: ::c_int = 0o100000; -pub const O_DIRECTORY: ::c_int = 0o200000; -pub const O_NOFOLLOW: ::c_int = 0o400000; -pub const O_ASYNC: ::c_int = 0o10000; - -pub const FIOCLEX: ::c_int = 0x6601; -pub const FIONBIO: ::c_int = 0x667E; - -pub const RLIMIT_RSS: ::c_int = 7; -pub const RLIMIT_NOFILE: ::c_int = 5; -pub const RLIMIT_AS: ::c_int = 6; -pub const RLIMIT_NPROC: ::c_int = 8; -pub const RLIMIT_MEMLOCK: ::c_int = 9; - -pub const O_APPEND: ::c_int = 0o010; -pub const O_CREAT: ::c_int = 0o400; -pub const O_EXCL: ::c_int = 0o2000; -pub const O_NOCTTY: ::c_int = 0o4000; -pub const O_NONBLOCK: ::c_int = 0o200; -pub const O_SYNC: ::c_int = 0o40020; -pub const O_RSYNC: ::c_int = 0o40020; -pub const O_DSYNC: ::c_int = 0o020; - -pub const SOCK_NONBLOCK: ::c_int = 0o200; - -pub const MAP_ANON: ::c_int = 0x800; -pub const MAP_GROWSDOWN: ::c_int = 0x1000; -pub const MAP_DENYWRITE: ::c_int = 0x2000; -pub const MAP_EXECUTABLE: ::c_int = 0x4000; -pub const MAP_LOCKED: ::c_int = 0x8000; -pub const MAP_NORESERVE: ::c_int = 0x0400; -pub const MAP_POPULATE: ::c_int = 0x10000; -pub const MAP_NONBLOCK: ::c_int = 0x20000; -pub const MAP_STACK: ::c_int = 0x40000; - -pub const EDEADLK: ::c_int = 45; -pub const ENAMETOOLONG: ::c_int = 78; -pub const ENOLCK: ::c_int = 46; -pub const ENOSYS: ::c_int = 89; -pub const ENOTEMPTY: ::c_int = 93; -pub const ELOOP: ::c_int = 90; -pub const ENOMSG: ::c_int = 35; -pub const EIDRM: ::c_int = 36; -pub const ECHRNG: ::c_int = 37; -pub const EL2NSYNC: ::c_int = 38; -pub const EL3HLT: ::c_int = 39; -pub const EL3RST: ::c_int = 40; -pub const ELNRNG: ::c_int = 41; -pub const EUNATCH: ::c_int = 42; -pub const ENOCSI: ::c_int = 43; -pub const EL2HLT: ::c_int = 44; -pub const EBADE: ::c_int = 50; -pub const EBADR: ::c_int = 51; -pub const EXFULL: ::c_int = 52; -pub const ENOANO: ::c_int = 53; -pub const EBADRQC: ::c_int = 54; -pub const EBADSLT: ::c_int = 55; -pub const EDEADLOCK: ::c_int = 56; -pub const EMULTIHOP: ::c_int = 74; -pub const EOVERFLOW: ::c_int = 79; -pub const ENOTUNIQ: ::c_int = 80; -pub const EBADFD: ::c_int = 81; -pub const EBADMSG: ::c_int = 77; -pub const EREMCHG: ::c_int = 82; -pub const ELIBACC: ::c_int = 83; -pub const ELIBBAD: ::c_int = 84; -pub const ELIBSCN: ::c_int = 85; -pub const ELIBMAX: ::c_int = 86; -pub const ELIBEXEC: ::c_int = 87; -pub const EILSEQ: ::c_int = 88; -pub const ERESTART: ::c_int = 91; -pub const ESTRPIPE: ::c_int = 92; -pub const EUSERS: ::c_int = 94; -pub const ENOTSOCK: ::c_int = 95; -pub const EDESTADDRREQ: ::c_int = 96; -pub const EMSGSIZE: ::c_int = 97; -pub const EPROTOTYPE: ::c_int = 98; -pub const ENOPROTOOPT: ::c_int = 99; -pub const EPROTONOSUPPORT: ::c_int = 120; -pub const ESOCKTNOSUPPORT: ::c_int = 121; -pub const EOPNOTSUPP: ::c_int = 122; -pub const ENOTSUP: ::c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: ::c_int = 123; -pub const EAFNOSUPPORT: ::c_int = 124; -pub const EADDRINUSE: ::c_int = 125; -pub const EADDRNOTAVAIL: ::c_int = 126; -pub const ENETDOWN: ::c_int = 127; -pub const ENETUNREACH: ::c_int = 128; -pub const ENETRESET: ::c_int = 129; -pub const ECONNABORTED: ::c_int = 130; -pub const ECONNRESET: ::c_int = 131; -pub const ENOBUFS: ::c_int = 132; -pub const EISCONN: ::c_int = 133; -pub const ENOTCONN: ::c_int = 134; -pub const ESHUTDOWN: ::c_int = 143; -pub const ETOOMANYREFS: ::c_int = 144; -pub const ETIMEDOUT: ::c_int = 145; -pub const ECONNREFUSED: ::c_int = 146; -pub const EHOSTDOWN: ::c_int = 147; -pub const EHOSTUNREACH: ::c_int = 148; -pub const EALREADY: ::c_int = 149; -pub const EINPROGRESS: ::c_int = 150; -pub const ESTALE: ::c_int = 151; -pub const EUCLEAN: ::c_int = 135; -pub const ENOTNAM: ::c_int = 137; -pub const ENAVAIL: ::c_int = 138; -pub const EISNAM: ::c_int = 139; -pub const EREMOTEIO: ::c_int = 140; -pub const EDQUOT: ::c_int = 1133; -pub const ENOMEDIUM: ::c_int = 159; -pub const EMEDIUMTYPE: ::c_int = 160; -pub const ECANCELED: ::c_int = 158; -pub const ENOKEY: ::c_int = 161; -pub const EKEYEXPIRED: ::c_int = 162; -pub const EKEYREVOKED: ::c_int = 163; -pub const EKEYREJECTED: ::c_int = 164; -pub const EOWNERDEAD: ::c_int = 165; -pub const ENOTRECOVERABLE: ::c_int = 166; -pub const EHWPOISON: ::c_int = 168; -pub const ERFKILL: ::c_int = 167; - -pub const SOCK_STREAM: ::c_int = 2; -pub const SOCK_DGRAM: ::c_int = 1; -pub const SOCK_SEQPACKET: ::c_int = 5; - -pub const SOL_SOCKET: ::c_int = 65535; - -pub const SO_REUSEADDR: ::c_int = 0x0004; -pub const SO_TYPE: ::c_int = 0x1008; -pub const SO_ERROR: ::c_int = 0x1007; -pub const SO_DONTROUTE: ::c_int = 0x0010; -pub const SO_BROADCAST: ::c_int = 0x0020; -pub const SO_SNDBUF: ::c_int = 0x1001; -pub const SO_RCVBUF: ::c_int = 0x1002; -pub const SO_KEEPALIVE: ::c_int = 0x0008; -pub const SO_OOBINLINE: ::c_int = 0x0100; -pub const SO_LINGER: ::c_int = 0x0080; -pub const SO_REUSEPORT: ::c_int = 0x200; -pub const SO_RCVLOWAT: ::c_int = 0x1004; -pub const SO_SNDLOWAT: ::c_int = 0x1003; -pub const SO_RCVTIMEO: ::c_int = 0x1006; -pub const SO_SNDTIMEO: ::c_int = 0x1005; -pub const SO_ACCEPTCONN: ::c_int = 0x1009; - -pub const SA_ONSTACK: ::c_int = 0x08000000; -pub const SA_SIGINFO: ::c_int = 8; -pub const SA_NOCLDWAIT: ::c_int = 0x10000; - -pub const SIGCHLD: ::c_int = 18; -pub const SIGBUS: ::c_int = 10; -pub const SIGTTIN: ::c_int = 26; -pub const SIGTTOU: ::c_int = 27; -pub const SIGXCPU: ::c_int = 30; -pub const SIGXFSZ: ::c_int = 31; -pub const SIGVTALRM: ::c_int = 28; -pub const SIGPROF: ::c_int = 29; -pub const SIGWINCH: ::c_int = 20; -pub const SIGUSR1: ::c_int = 16; -pub const SIGUSR2: ::c_int = 17; -pub const SIGCONT: ::c_int = 25; -pub const SIGSTOP: ::c_int = 23; -pub const SIGTSTP: ::c_int = 24; -pub const SIGURG: ::c_int = 21; -pub const SIGIO: ::c_int = 22; -pub const SIGSYS: ::c_int = 12; -pub const SIGSTKFLT: ::c_int = 7; -pub const SIGPOLL: ::c_int = ::SIGIO; -pub const SIGPWR: ::c_int = 19; -pub const SIG_SETMASK: ::c_int = 3; -pub const SIG_BLOCK: ::c_int = 1; -pub const SIG_UNBLOCK: ::c_int = 2; - -pub const EXTPROC: ::tcflag_t = 0o200000; - -pub const MAP_HUGETLB: ::c_int = 0x80000; - -pub const F_GETLK: ::c_int = 33; -pub const F_GETOWN: ::c_int = 23; -pub const F_SETLK: ::c_int = 34; -pub const F_SETLKW: ::c_int = 35; -pub const F_SETOWN: ::c_int = 24; - -pub const VEOF: usize = 16; -pub const VEOL: usize = 17; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 4; -pub const IEXTEN: ::tcflag_t = 0o000400; -pub const TOSTOP: ::tcflag_t = 0o100000; -pub const FLUSHO: ::tcflag_t = 0o020000; - -pub const TCGETS: ::c_int = 0x540D; -pub const TCSETS: ::c_int = 0x540E; -pub const TCSETSW: ::c_int = 0x540F; -pub const TCSETSF: ::c_int = 0x5410; -pub const TCGETA: ::c_int = 0x5401; -pub const TCSETA: ::c_int = 0x5402; -pub const TCSETAW: ::c_int = 0x5403; -pub const TCSETAF: ::c_int = 0x5404; -pub const TCSBRK: ::c_int = 0x5405; -pub const TCXONC: ::c_int = 0x5406; -pub const TCFLSH: ::c_int = 0x5407; -pub const TIOCGSOFTCAR: ::c_int = 0x5481; -pub const TIOCSSOFTCAR: ::c_int = 0x5482; -pub const TIOCLINUX: ::c_int = 0x5483; -pub const TIOCGSERIAL: ::c_int = 0x5484; -pub const TIOCEXCL: ::c_int = 0x740D; -pub const TIOCNXCL: ::c_int = 0x740E; -pub const TIOCSCTTY: ::c_int = 0x5480; -pub const TIOCGPGRP: ::c_int = 0x40047477; -pub const TIOCSPGRP: ::c_int = 0x80047476; -pub const TIOCOUTQ: ::c_int = 0x7472; -pub const TIOCSTI: ::c_int = 0x5472; -pub const TIOCGWINSZ: ::c_int = 0x40087468; -pub const TIOCSWINSZ: ::c_int = 0x80087467; -pub const TIOCMGET: ::c_int = 0x741D; -pub const TIOCMBIS: ::c_int = 0x741B; -pub const TIOCMBIC: ::c_int = 0x741C; -pub const TIOCMSET: ::c_int = 0x741A; -pub const FIONREAD: ::c_int = 0x467F; -pub const TIOCCONS: ::c_int = 0x80047478; - -pub const SYS_gettid: ::c_long = 4222; // Valid for O32 -pub const SYS_perf_event_open: ::c_long = 4333; // Valid for O32 - -pub const POLLWRNORM: ::c_short = 0x4; -pub const POLLWRBAND: ::c_short = 0x100; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -pub type c_long = i32; -pub type c_ulong = u32; -pub type nlink_t = u32; - -s! { - pub struct pthread_attr_t { - __size: [u32; 9] - } - - pub struct sigset_t { - __val: [::c_ulong; 32], - } - - pub struct msghdr { - pub msg_name: *mut ::c_void, - pub msg_namelen: ::socklen_t, - pub msg_iov: *mut ::iovec, - pub msg_iovlen: ::c_int, - pub msg_control: *mut ::c_void, - pub msg_controllen: ::socklen_t, - pub msg_flags: ::c_int, - } - - pub struct sem_t { - __val: [::c_int; 4], - } -} - -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; - -cfg_if! { - if #[cfg(any(target_arch = "x86"))] { - mod x86; - pub use self::x86::*; - } else if #[cfg(any(target_arch = "mips"))] { - mod mips; - pub use self::mips::*; - } else if #[cfg(any(target_arch = "arm"))] { - mod arm; - pub use self::arm::*; - } else if #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))] { - // For the time being asmjs and wasm32 are the same, and both - // backed by identical emscripten runtimes - mod asmjs; - pub use self::asmjs::*; - } else { - // Unknown target_arch - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/x86.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/x86.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/x86.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b32/x86.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,351 +0,0 @@ -pub type c_char = i8; -pub type wchar_t = i32; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - __st_dev_padding: ::c_int, - __st_ino_truncated: ::c_long, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __st_rdev_padding: ::c_int, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_ino: ::ino_t, - } - - pub struct stat64 { - pub st_dev: ::dev_t, - __st_dev_padding: ::c_int, - __st_ino_truncated: ::c_long, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __st_rdev_padding: ::c_int, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_ino: ::ino_t, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_flags: ::c_int, - pub ss_size: ::size_t - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - __unused1: ::c_int, - pub shm_dtime: ::time_t, - __unused2: ::c_int, - pub shm_ctime: ::time_t, - __unused3: ::c_int, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::c_ulong, - __pad1: ::c_ulong, - __pad2: ::c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - pub msg_stime: ::time_t, - __unused1: ::c_int, - pub msg_rtime: ::time_t, - __unused2: ::c_int, - pub msg_ctime: ::time_t, - __unused3: ::c_int, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __pad1: ::c_ulong, - __pad2: ::c_ulong, - } - - pub struct statfs { - pub f_type: ::c_ulong, - pub f_bsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_fsid: ::fsid_t, - pub f_namelen: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_flags: ::c_ulong, - pub f_spare: [::c_ulong; 4], - } - - pub struct mcontext_t { - __private: [u32; 22] - } - - pub struct ucontext_t { - pub uc_flags: ::c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: ::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: ::sigset_t, - __private: [u8; 112], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_errno: ::c_int, - pub si_code: ::c_int, - pub _pad: [::c_int; 29], - _align: [usize; 0], - } -} - -pub const O_DIRECT: ::c_int = 0x4000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_NOFOLLOW: ::c_int = 0x20000; -pub const O_ASYNC: ::c_int = 0x2000; - -pub const FIOCLEX: ::c_int = 0x5451; -pub const FIONBIO: ::c_int = 0x5421; - -pub const RLIMIT_RSS: ::c_int = 5; -pub const RLIMIT_NOFILE: ::c_int = 7; -pub const RLIMIT_AS: ::c_int = 9; -pub const RLIMIT_NPROC: ::c_int = 6; -pub const RLIMIT_MEMLOCK: ::c_int = 8; - -pub const O_APPEND: ::c_int = 1024; -pub const O_CREAT: ::c_int = 64; -pub const O_EXCL: ::c_int = 128; -pub const O_NOCTTY: ::c_int = 256; -pub const O_NONBLOCK: ::c_int = 2048; -pub const O_SYNC: ::c_int = 1052672; -pub const O_RSYNC: ::c_int = 1052672; -pub const O_DSYNC: ::c_int = 4096; - -pub const SOCK_NONBLOCK: ::c_int = 2048; - -pub const MAP_ANON: ::c_int = 0x0020; -pub const MAP_GROWSDOWN: ::c_int = 0x0100; -pub const MAP_DENYWRITE: ::c_int = 0x0800; -pub const MAP_EXECUTABLE: ::c_int = 0x01000; -pub const MAP_LOCKED: ::c_int = 0x02000; -pub const MAP_NORESERVE: ::c_int = 0x04000; -pub const MAP_POPULATE: ::c_int = 0x08000; -pub const MAP_NONBLOCK: ::c_int = 0x010000; -pub const MAP_STACK: ::c_int = 0x020000; - -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; -pub const SOCK_SEQPACKET: ::c_int = 5; - -pub const SOL_SOCKET: ::c_int = 1; - -pub const EDEADLK: ::c_int = 35; -pub const ENAMETOOLONG: ::c_int = 36; -pub const ENOLCK: ::c_int = 37; -pub const ENOSYS: ::c_int = 38; -pub const ENOTEMPTY: ::c_int = 39; -pub const ELOOP: ::c_int = 40; -pub const ENOMSG: ::c_int = 42; -pub const EIDRM: ::c_int = 43; -pub const ECHRNG: ::c_int = 44; -pub const EL2NSYNC: ::c_int = 45; -pub const EL3HLT: ::c_int = 46; -pub const EL3RST: ::c_int = 47; -pub const ELNRNG: ::c_int = 48; -pub const EUNATCH: ::c_int = 49; -pub const ENOCSI: ::c_int = 50; -pub const EL2HLT: ::c_int = 51; -pub const EBADE: ::c_int = 52; -pub const EBADR: ::c_int = 53; -pub const EXFULL: ::c_int = 54; -pub const ENOANO: ::c_int = 55; -pub const EBADRQC: ::c_int = 56; -pub const EBADSLT: ::c_int = 57; -pub const EDEADLOCK: ::c_int = EDEADLK; -pub const EMULTIHOP: ::c_int = 72; -pub const EBADMSG: ::c_int = 74; -pub const EOVERFLOW: ::c_int = 75; -pub const ENOTUNIQ: ::c_int = 76; -pub const EBADFD: ::c_int = 77; -pub const EREMCHG: ::c_int = 78; -pub const ELIBACC: ::c_int = 79; -pub const ELIBBAD: ::c_int = 80; -pub const ELIBSCN: ::c_int = 81; -pub const ELIBMAX: ::c_int = 82; -pub const ELIBEXEC: ::c_int = 83; -pub const EILSEQ: ::c_int = 84; -pub const ERESTART: ::c_int = 85; -pub const ESTRPIPE: ::c_int = 86; -pub const EUSERS: ::c_int = 87; -pub const ENOTSOCK: ::c_int = 88; -pub const EDESTADDRREQ: ::c_int = 89; -pub const EMSGSIZE: ::c_int = 90; -pub const EPROTOTYPE: ::c_int = 91; -pub const ENOPROTOOPT: ::c_int = 92; -pub const EPROTONOSUPPORT: ::c_int = 93; -pub const ESOCKTNOSUPPORT: ::c_int = 94; -pub const EOPNOTSUPP: ::c_int = 95; -pub const ENOTSUP: ::c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: ::c_int = 96; -pub const EAFNOSUPPORT: ::c_int = 97; -pub const EADDRINUSE: ::c_int = 98; -pub const EADDRNOTAVAIL: ::c_int = 99; -pub const ENETDOWN: ::c_int = 100; -pub const ENETUNREACH: ::c_int = 101; -pub const ENETRESET: ::c_int = 102; -pub const ECONNABORTED: ::c_int = 103; -pub const ECONNRESET: ::c_int = 104; -pub const ENOBUFS: ::c_int = 105; -pub const EISCONN: ::c_int = 106; -pub const ENOTCONN: ::c_int = 107; -pub const ESHUTDOWN: ::c_int = 108; -pub const ETOOMANYREFS: ::c_int = 109; -pub const ETIMEDOUT: ::c_int = 110; -pub const ECONNREFUSED: ::c_int = 111; -pub const EHOSTDOWN: ::c_int = 112; -pub const EHOSTUNREACH: ::c_int = 113; -pub const EALREADY: ::c_int = 114; -pub const EINPROGRESS: ::c_int = 115; -pub const ESTALE: ::c_int = 116; -pub const EUCLEAN: ::c_int = 117; -pub const ENOTNAM: ::c_int = 118; -pub const ENAVAIL: ::c_int = 119; -pub const EISNAM: ::c_int = 120; -pub const EREMOTEIO: ::c_int = 121; -pub const EDQUOT: ::c_int = 122; -pub const ENOMEDIUM: ::c_int = 123; -pub const EMEDIUMTYPE: ::c_int = 124; -pub const ECANCELED: ::c_int = 125; -pub const ENOKEY: ::c_int = 126; -pub const EKEYEXPIRED: ::c_int = 127; -pub const EKEYREVOKED: ::c_int = 128; -pub const EKEYREJECTED: ::c_int = 129; -pub const EOWNERDEAD: ::c_int = 130; -pub const ENOTRECOVERABLE: ::c_int = 131; -pub const ERFKILL: ::c_int = 132; -pub const EHWPOISON: ::c_int = 133; - -pub const SO_REUSEADDR: ::c_int = 2; -pub const SO_TYPE: ::c_int = 3; -pub const SO_ERROR: ::c_int = 4; -pub const SO_DONTROUTE: ::c_int = 5; -pub const SO_BROADCAST: ::c_int = 6; -pub const SO_SNDBUF: ::c_int = 7; -pub const SO_RCVBUF: ::c_int = 8; -pub const SO_KEEPALIVE: ::c_int = 9; -pub const SO_OOBINLINE: ::c_int = 10; -pub const SO_LINGER: ::c_int = 13; -pub const SO_REUSEPORT: ::c_int = 15; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; -pub const SO_ACCEPTCONN: ::c_int = 30; - -pub const SA_ONSTACK: ::c_int = 0x08000000; -pub const SA_SIGINFO: ::c_int = 0x00000004; -pub const SA_NOCLDWAIT: ::c_int = 0x00000002; - -pub const SIGCHLD: ::c_int = 17; -pub const SIGBUS: ::c_int = 7; -pub const SIGTTIN: ::c_int = 21; -pub const SIGTTOU: ::c_int = 22; -pub const SIGXCPU: ::c_int = 24; -pub const SIGXFSZ: ::c_int = 25; -pub const SIGVTALRM: ::c_int = 26; -pub const SIGPROF: ::c_int = 27; -pub const SIGWINCH: ::c_int = 28; -pub const SIGUSR1: ::c_int = 10; -pub const SIGUSR2: ::c_int = 12; -pub const SIGCONT: ::c_int = 18; -pub const SIGSTOP: ::c_int = 19; -pub const SIGTSTP: ::c_int = 20; -pub const SIGURG: ::c_int = 23; -pub const SIGIO: ::c_int = 29; -pub const SIGSYS: ::c_int = 31; -pub const SIGSTKFLT: ::c_int = 16; -pub const SIGPOLL: ::c_int = 29; -pub const SIGPWR: ::c_int = 30; -pub const SIG_SETMASK: ::c_int = 2; -pub const SIG_BLOCK: ::c_int = 0x000000; -pub const SIG_UNBLOCK: ::c_int = 0x01; - -pub const EXTPROC: ::tcflag_t = 0x00010000; - -pub const MAP_HUGETLB: ::c_int = 0x040000; -pub const MAP_32BIT: ::c_int = 0x0040; - -pub const F_GETLK: ::c_int = 12; -pub const F_GETOWN: ::c_int = 9; -pub const F_SETLK: ::c_int = 13; -pub const F_SETLKW: ::c_int = 14; -pub const F_SETOWN: ::c_int = 8; - -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; - -pub const TCGETS: ::c_int = 0x5401; -pub const TCSETS: ::c_int = 0x5402; -pub const TCSETSW: ::c_int = 0x5403; -pub const TCSETSF: ::c_int = 0x5404; -pub const TCGETA: ::c_int = 0x5405; -pub const TCSETA: ::c_int = 0x5406; -pub const TCSETAW: ::c_int = 0x5407; -pub const TCSETAF: ::c_int = 0x5408; -pub const TCSBRK: ::c_int = 0x5409; -pub const TCXONC: ::c_int = 0x540A; -pub const TCFLSH: ::c_int = 0x540B; -pub const TIOCGSOFTCAR: ::c_int = 0x5419; -pub const TIOCSSOFTCAR: ::c_int = 0x541A; -pub const TIOCLINUX: ::c_int = 0x541C; -pub const TIOCGSERIAL: ::c_int = 0x541E; -pub const TIOCEXCL: ::c_int = 0x540C; -pub const TIOCNXCL: ::c_int = 0x540D; -pub const TIOCSCTTY: ::c_int = 0x540E; -pub const TIOCGPGRP: ::c_int = 0x540F; -pub const TIOCSPGRP: ::c_int = 0x5410; -pub const TIOCOUTQ: ::c_int = 0x5411; -pub const TIOCSTI: ::c_int = 0x5412; -pub const TIOCGWINSZ: ::c_int = 0x5413; -pub const TIOCSWINSZ: ::c_int = 0x5414; -pub const TIOCMGET: ::c_int = 0x5415; -pub const TIOCMBIS: ::c_int = 0x5416; -pub const TIOCMBIC: ::c_int = 0x5417; -pub const TIOCMSET: ::c_int = 0x5418; -pub const FIONREAD: ::c_int = 0x541B; -pub const TIOCCONS: ::c_int = 0x541D; - -pub const SYS_gettid: ::c_long = 224; -pub const SYS_perf_event_open: ::c_long = 336; - -pub const POLLWRNORM: ::c_short = 0x100; -pub const POLLWRBAND: ::c_short = 0x200; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/aarch64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/aarch64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/aarch64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/aarch64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -pub type c_char = u8; - -pub const SYS_perf_event_open: ::c_long = 241; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,371 +0,0 @@ -pub type wchar_t = i32; -pub type c_long = i64; -pub type c_ulong = u64; -pub type nlink_t = u64; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - __pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __unused: [::c_long; 3], - } - - pub struct stat64 { - pub st_dev: ::dev_t, - pub st_ino: ::ino64_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - __pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __reserved: [::c_long; 3], - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_flags: ::c_int, - pub ss_size: ::size_t - } - - pub struct pthread_attr_t { - __size: [u64; 7] - } - - pub struct sigset_t { - __val: [::c_ulong; 16], - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - pub shm_dtime: ::time_t, - pub shm_ctime: ::time_t, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::c_ulong, - __pad1: ::c_ulong, - __pad2: ::c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - pub msg_stime: ::time_t, - pub msg_rtime: ::time_t, - pub msg_ctime: ::time_t, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __pad1: ::c_ulong, - __pad2: ::c_ulong, - } - - pub struct statfs { - pub f_type: ::c_ulong, - pub f_bsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_fsid: ::fsid_t, - pub f_namelen: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_flags: ::c_ulong, - pub f_spare: [::c_ulong; 4], - } - - pub struct msghdr { - pub msg_name: *mut ::c_void, - pub msg_namelen: ::socklen_t, - pub msg_iov: *mut ::iovec, - pub msg_iovlen: ::c_int, - __pad1: ::c_int, - pub msg_control: *mut ::c_void, - pub msg_controllen: ::socklen_t, - __pad2: ::socklen_t, - pub msg_flags: ::c_int, - } - - pub struct sem_t { - __val: [::c_int; 8], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_errno: ::c_int, - pub si_code: ::c_int, - pub _pad: [::c_int; 29], - _align: [usize; 0], - } -} - -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; - -pub const O_DIRECT: ::c_int = 0x4000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_NOFOLLOW: ::c_int = 0x20000; -pub const O_ASYNC: ::c_int = 0x2000; - -pub const FIOCLEX: ::c_int = 0x5451; -pub const FIONBIO: ::c_int = 0x5421; - -pub const RLIMIT_RSS: ::c_int = 5; -pub const RLIMIT_NOFILE: ::c_int = 7; -pub const RLIMIT_AS: ::c_int = 9; -pub const RLIMIT_NPROC: ::c_int = 6; -pub const RLIMIT_MEMLOCK: ::c_int = 8; - -pub const O_APPEND: ::c_int = 1024; -pub const O_CREAT: ::c_int = 64; -pub const O_EXCL: ::c_int = 128; -pub const O_NOCTTY: ::c_int = 256; -pub const O_NONBLOCK: ::c_int = 2048; -pub const O_SYNC: ::c_int = 1052672; -pub const O_RSYNC: ::c_int = 1052672; -pub const O_DSYNC: ::c_int = 4096; - -pub const SOCK_NONBLOCK: ::c_int = 2048; - -pub const MAP_ANON: ::c_int = 0x0020; -pub const MAP_GROWSDOWN: ::c_int = 0x0100; -pub const MAP_DENYWRITE: ::c_int = 0x0800; -pub const MAP_EXECUTABLE: ::c_int = 0x01000; -pub const MAP_LOCKED: ::c_int = 0x02000; -pub const MAP_NORESERVE: ::c_int = 0x04000; -pub const MAP_POPULATE: ::c_int = 0x08000; -pub const MAP_NONBLOCK: ::c_int = 0x010000; -pub const MAP_STACK: ::c_int = 0x020000; -pub const MAP_32BIT: ::c_int = 0x0040; - -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; -pub const SOCK_SEQPACKET: ::c_int = 5; - -pub const SOL_SOCKET: ::c_int = 1; - -pub const EDEADLK: ::c_int = 35; -pub const ENAMETOOLONG: ::c_int = 36; -pub const ENOLCK: ::c_int = 37; -pub const ENOSYS: ::c_int = 38; -pub const ENOTEMPTY: ::c_int = 39; -pub const ELOOP: ::c_int = 40; -pub const ENOMSG: ::c_int = 42; -pub const EIDRM: ::c_int = 43; -pub const ECHRNG: ::c_int = 44; -pub const EL2NSYNC: ::c_int = 45; -pub const EL3HLT: ::c_int = 46; -pub const EL3RST: ::c_int = 47; -pub const ELNRNG: ::c_int = 48; -pub const EUNATCH: ::c_int = 49; -pub const ENOCSI: ::c_int = 50; -pub const EL2HLT: ::c_int = 51; -pub const EBADE: ::c_int = 52; -pub const EBADR: ::c_int = 53; -pub const EXFULL: ::c_int = 54; -pub const ENOANO: ::c_int = 55; -pub const EBADRQC: ::c_int = 56; -pub const EBADSLT: ::c_int = 57; -pub const EDEADLOCK: ::c_int = EDEADLK; -pub const EMULTIHOP: ::c_int = 72; -pub const EBADMSG: ::c_int = 74; -pub const EOVERFLOW: ::c_int = 75; -pub const ENOTUNIQ: ::c_int = 76; -pub const EBADFD: ::c_int = 77; -pub const EREMCHG: ::c_int = 78; -pub const ELIBACC: ::c_int = 79; -pub const ELIBBAD: ::c_int = 80; -pub const ELIBSCN: ::c_int = 81; -pub const ELIBMAX: ::c_int = 82; -pub const ELIBEXEC: ::c_int = 83; -pub const EILSEQ: ::c_int = 84; -pub const ERESTART: ::c_int = 85; -pub const ESTRPIPE: ::c_int = 86; -pub const EUSERS: ::c_int = 87; -pub const ENOTSOCK: ::c_int = 88; -pub const EDESTADDRREQ: ::c_int = 89; -pub const EMSGSIZE: ::c_int = 90; -pub const EPROTOTYPE: ::c_int = 91; -pub const ENOPROTOOPT: ::c_int = 92; -pub const EPROTONOSUPPORT: ::c_int = 93; -pub const ESOCKTNOSUPPORT: ::c_int = 94; -pub const EOPNOTSUPP: ::c_int = 95; -pub const ENOTSUP: ::c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: ::c_int = 96; -pub const EAFNOSUPPORT: ::c_int = 97; -pub const EADDRINUSE: ::c_int = 98; -pub const EADDRNOTAVAIL: ::c_int = 99; -pub const ENETDOWN: ::c_int = 100; -pub const ENETUNREACH: ::c_int = 101; -pub const ENETRESET: ::c_int = 102; -pub const ECONNABORTED: ::c_int = 103; -pub const ECONNRESET: ::c_int = 104; -pub const ENOBUFS: ::c_int = 105; -pub const EISCONN: ::c_int = 106; -pub const ENOTCONN: ::c_int = 107; -pub const ESHUTDOWN: ::c_int = 108; -pub const ETOOMANYREFS: ::c_int = 109; -pub const ETIMEDOUT: ::c_int = 110; -pub const ECONNREFUSED: ::c_int = 111; -pub const EHOSTDOWN: ::c_int = 112; -pub const EHOSTUNREACH: ::c_int = 113; -pub const EALREADY: ::c_int = 114; -pub const EINPROGRESS: ::c_int = 115; -pub const ESTALE: ::c_int = 116; -pub const EUCLEAN: ::c_int = 117; -pub const ENOTNAM: ::c_int = 118; -pub const ENAVAIL: ::c_int = 119; -pub const EISNAM: ::c_int = 120; -pub const EREMOTEIO: ::c_int = 121; -pub const EDQUOT: ::c_int = 122; -pub const ENOMEDIUM: ::c_int = 123; -pub const EMEDIUMTYPE: ::c_int = 124; -pub const ECANCELED: ::c_int = 125; -pub const ENOKEY: ::c_int = 126; -pub const EKEYEXPIRED: ::c_int = 127; -pub const EKEYREVOKED: ::c_int = 128; -pub const EKEYREJECTED: ::c_int = 129; -pub const EOWNERDEAD: ::c_int = 130; -pub const ENOTRECOVERABLE: ::c_int = 131; -pub const ERFKILL: ::c_int = 132; -pub const EHWPOISON: ::c_int = 133; - -pub const SO_REUSEADDR: ::c_int = 2; -pub const SO_TYPE: ::c_int = 3; -pub const SO_ERROR: ::c_int = 4; -pub const SO_DONTROUTE: ::c_int = 5; -pub const SO_BROADCAST: ::c_int = 6; -pub const SO_SNDBUF: ::c_int = 7; -pub const SO_RCVBUF: ::c_int = 8; -pub const SO_KEEPALIVE: ::c_int = 9; -pub const SO_OOBINLINE: ::c_int = 10; -pub const SO_LINGER: ::c_int = 13; -pub const SO_REUSEPORT: ::c_int = 15; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; -pub const SO_ACCEPTCONN: ::c_int = 30; - -pub const SA_ONSTACK: ::c_int = 0x08000000; -pub const SA_SIGINFO: ::c_int = 0x00000004; -pub const SA_NOCLDWAIT: ::c_int = 0x00000002; - -pub const SIGCHLD: ::c_int = 17; -pub const SIGBUS: ::c_int = 7; -pub const SIGTTIN: ::c_int = 21; -pub const SIGTTOU: ::c_int = 22; -pub const SIGXCPU: ::c_int = 24; -pub const SIGXFSZ: ::c_int = 25; -pub const SIGVTALRM: ::c_int = 26; -pub const SIGPROF: ::c_int = 27; -pub const SIGWINCH: ::c_int = 28; -pub const SIGUSR1: ::c_int = 10; -pub const SIGUSR2: ::c_int = 12; -pub const SIGCONT: ::c_int = 18; -pub const SIGSTOP: ::c_int = 19; -pub const SIGTSTP: ::c_int = 20; -pub const SIGURG: ::c_int = 23; -pub const SIGIO: ::c_int = 29; -pub const SIGSYS: ::c_int = 31; -pub const SIGSTKFLT: ::c_int = 16; -pub const SIGPOLL: ::c_int = 29; -pub const SIGPWR: ::c_int = 30; -pub const SIG_SETMASK: ::c_int = 2; -pub const SIG_BLOCK: ::c_int = 0x000000; -pub const SIG_UNBLOCK: ::c_int = 0x01; - -pub const EXTPROC: ::tcflag_t = 0x00010000; - -pub const MAP_HUGETLB: ::c_int = 0x040000; - -pub const F_GETLK: ::c_int = 5; -pub const F_GETOWN: ::c_int = 9; -pub const F_SETLK: ::c_int = 6; -pub const F_SETLKW: ::c_int = 7; -pub const F_SETOWN: ::c_int = 8; - -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; - -pub const TCGETS: ::c_int = 0x5401; -pub const TCSETS: ::c_int = 0x5402; -pub const TCSETSW: ::c_int = 0x5403; -pub const TCSETSF: ::c_int = 0x5404; -pub const TCGETA: ::c_int = 0x5405; -pub const TCSETA: ::c_int = 0x5406; -pub const TCSETAW: ::c_int = 0x5407; -pub const TCSETAF: ::c_int = 0x5408; -pub const TCSBRK: ::c_int = 0x5409; -pub const TCXONC: ::c_int = 0x540A; -pub const TCFLSH: ::c_int = 0x540B; -pub const TIOCGSOFTCAR: ::c_int = 0x5419; -pub const TIOCSSOFTCAR: ::c_int = 0x541A; -pub const TIOCLINUX: ::c_int = 0x541C; -pub const TIOCGSERIAL: ::c_int = 0x541E; -pub const TIOCEXCL: ::c_int = 0x540C; -pub const TIOCNXCL: ::c_int = 0x540D; -pub const TIOCSCTTY: ::c_int = 0x540E; -pub const TIOCGPGRP: ::c_int = 0x540F; -pub const TIOCSPGRP: ::c_int = 0x5410; -pub const TIOCOUTQ: ::c_int = 0x5411; -pub const TIOCSTI: ::c_int = 0x5412; -pub const TIOCGWINSZ: ::c_int = 0x5413; -pub const TIOCSWINSZ: ::c_int = 0x5414; -pub const TIOCMGET: ::c_int = 0x5415; -pub const TIOCMBIS: ::c_int = 0x5416; -pub const TIOCMBIC: ::c_int = 0x5417; -pub const TIOCMSET: ::c_int = 0x5418; -pub const FIONREAD: ::c_int = 0x541B; -pub const TIOCCONS: ::c_int = 0x541D; - -pub const POLLWRNORM: ::c_short = 0x100; -pub const POLLWRBAND: ::c_short = 0x200; - -cfg_if! { - if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(any(target_arch = "powerpc64"))] { - mod powerpc64; - pub use self::powerpc64::*; - } else if #[cfg(any(target_arch = "x86_64"))] { - mod x86_64; - pub use self::x86_64::*; - } else { - // Unknown target_arch - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/powerpc64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/powerpc64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/powerpc64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/powerpc64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -pub type c_char = u8; - -pub const SYS_perf_event_open: ::c_long = 319; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/x86_64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/x86_64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/x86_64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/b64/x86_64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -pub type c_char = i8; - -s! { - pub struct mcontext_t { - __private: [u64; 32], - } - - pub struct ucontext_t { - pub uc_flags: ::c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: ::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: ::sigset_t, - __private: [u8; 512], - } -} - -pub const SYS_gettid: ::c_long = 186; - -pub const SYS_perf_event_open: ::c_long = 298; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/musl/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,265 +0,0 @@ -pub type clock_t = c_long; -pub type time_t = c_long; -pub type suseconds_t = c_long; -pub type ino_t = u64; -pub type off_t = i64; -pub type blkcnt_t = i64; - -pub type blksize_t = c_long; -pub type fsblkcnt_t = ::c_ulonglong; -pub type fsfilcnt_t = ::c_ulonglong; -pub type rlim_t = ::c_ulonglong; - -s! { - pub struct aiocb { - pub aio_fildes: ::c_int, - pub aio_lio_opcode: ::c_int, - pub aio_reqprio: ::c_int, - pub aio_buf: *mut ::c_void, - pub aio_nbytes: ::size_t, - pub aio_sigevent: ::sigevent, - __td: *mut ::c_void, - __lock: [::c_int; 2], - __err: ::c_int, - __ret: ::ssize_t, - pub aio_offset: off_t, - __next: *mut ::c_void, - __prev: *mut ::c_void, - #[cfg(target_pointer_width = "32")] - __dummy4: [::c_char; 24], - #[cfg(target_pointer_width = "64")] - __dummy4: [::c_char; 16], - } - - pub struct sigaction { - pub sa_sigaction: ::sighandler_t, - pub sa_mask: ::sigset_t, - pub sa_flags: ::c_int, - _restorer: *mut ::c_void, - } - - pub struct ipc_perm { - pub __ipc_perm_key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::mode_t, - pub __seq: ::c_int, - __unused1: ::c_long, - __unused2: ::c_long - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_line: ::cc_t, - pub c_cc: [::cc_t; ::NCCS], - pub __c_ispeed: ::speed_t, - pub __c_ospeed: ::speed_t, - } - - pub struct flock { - pub l_type: ::c_short, - pub l_whence: ::c_short, - pub l_start: ::off_t, - pub l_len: ::off_t, - pub l_pid: ::pid_t, - } - - pub struct sysinfo { - pub uptime: ::c_ulong, - pub loads: [::c_ulong; 3], - pub totalram: ::c_ulong, - pub freeram: ::c_ulong, - pub sharedram: ::c_ulong, - pub bufferram: ::c_ulong, - pub totalswap: ::c_ulong, - pub freeswap: ::c_ulong, - pub procs: ::c_ushort, - pub pad: ::c_ushort, - pub totalhigh: ::c_ulong, - pub freehigh: ::c_ulong, - pub mem_unit: ::c_uint, - pub __reserved: [::c_char; 256], - } -} - -pub const BUFSIZ: ::c_uint = 1024; -pub const TMP_MAX: ::c_uint = 10000; -pub const FOPEN_MAX: ::c_uint = 1000; -pub const O_ACCMODE: ::c_int = 0o10000003; -pub const O_NDELAY: ::c_int = O_NONBLOCK; -pub const NI_MAXHOST: ::socklen_t = 255; -pub const PTHREAD_STACK_MIN: ::size_t = 2048; -pub const POSIX_FADV_DONTNEED: ::c_int = 4; -pub const POSIX_FADV_NOREUSE: ::c_int = 5; - -pub const POSIX_MADV_DONTNEED: ::c_int = 4; - -pub const RLIM_INFINITY: ::rlim_t = !0; -pub const RLIMIT_RTTIME: ::c_int = 15; -pub const RLIMIT_NLIMITS: ::c_int = 16; - -pub const MAP_ANONYMOUS: ::c_int = MAP_ANON; - -pub const TCP_COOKIE_TRANSACTIONS: ::c_int = 15; -pub const TCP_THIN_LINEAR_TIMEOUTS: ::c_int = 16; -pub const TCP_THIN_DUPACK: ::c_int = 17; -pub const TCP_USER_TIMEOUT: ::c_int = 18; -pub const TCP_REPAIR: ::c_int = 19; -pub const TCP_REPAIR_QUEUE: ::c_int = 20; -pub const TCP_QUEUE_SEQ: ::c_int = 21; -pub const TCP_REPAIR_OPTIONS: ::c_int = 22; -pub const TCP_FASTOPEN: ::c_int = 23; -pub const TCP_TIMESTAMP: ::c_int = 24; - -pub const SIGUNUSED: ::c_int = ::SIGSYS; - -pub const FALLOC_FL_KEEP_SIZE: ::c_int = 0x01; -pub const FALLOC_FL_PUNCH_HOLE: ::c_int = 0x02; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; - -pub const CPU_SETSIZE: ::c_int = 128; - -pub const QFMT_VFS_V1: ::c_int = 4; - -pub const PTRACE_TRACEME: ::c_int = 0; -pub const PTRACE_PEEKTEXT: ::c_int = 1; -pub const PTRACE_PEEKDATA: ::c_int = 2; -pub const PTRACE_PEEKUSER: ::c_int = 3; -pub const PTRACE_POKETEXT: ::c_int = 4; -pub const PTRACE_POKEDATA: ::c_int = 5; -pub const PTRACE_POKEUSER: ::c_int = 6; -pub const PTRACE_CONT: ::c_int = 7; -pub const PTRACE_KILL: ::c_int = 8; -pub const PTRACE_SINGLESTEP: ::c_int = 9; -pub const PTRACE_ATTACH: ::c_int = 16; -pub const PTRACE_DETACH: ::c_int = 17; -pub const PTRACE_SYSCALL: ::c_int = 24; -pub const PTRACE_SETOPTIONS: ::c_int = 0x4200; -pub const PTRACE_GETEVENTMSG: ::c_int = 0x4201; -pub const PTRACE_GETSIGINFO: ::c_int = 0x4202; -pub const PTRACE_SETSIGINFO: ::c_int = 0x4203; -pub const PTRACE_GETREGSET: ::c_int = 0x4204; -pub const PTRACE_SETREGSET: ::c_int = 0x4205; -pub const PTRACE_SEIZE: ::c_int = 0x4206; -pub const PTRACE_INTERRUPT: ::c_int = 0x4207; -pub const PTRACE_LISTEN: ::c_int = 0x4208; -pub const PTRACE_PEEKSIGINFO: ::c_int = 0x4209; - -pub const PTRACE_O_EXITKILL: ::c_int = 1048576; -pub const PTRACE_O_TRACECLONE: ::c_int = 8; -pub const PTRACE_O_TRACEEXEC: ::c_int = 16; -pub const PTRACE_O_TRACEEXIT: ::c_int = 64; -pub const PTRACE_O_TRACEFORK: ::c_int = 2; -pub const PTRACE_O_TRACESYSGOOD: ::c_int = 1; -pub const PTRACE_O_TRACEVFORK: ::c_int = 4; -pub const PTRACE_O_TRACEVFORKDONE: ::c_int = 32; -pub const PTRACE_O_SUSPEND_SECCOMP: ::c_int = 2097152; - -pub const MADV_DODUMP: ::c_int = 17; -pub const MADV_DONTDUMP: ::c_int = 16; - -pub const EPOLLWAKEUP: ::c_int = 0x20000000; - -pub const POLLRDNORM: ::c_short = 0x040; -pub const POLLRDBAND: ::c_short = 0x080; - -pub const MADV_HUGEPAGE: ::c_int = 14; -pub const MADV_NOHUGEPAGE: ::c_int = 15; - -pub const PTRACE_GETFPREGS: ::c_uint = 14; -pub const PTRACE_SETFPREGS: ::c_uint = 15; -pub const PTRACE_GETFPXREGS: ::c_uint = 18; -pub const PTRACE_SETFPXREGS: ::c_uint = 19; -pub const PTRACE_GETREGS: ::c_uint = 12; -pub const PTRACE_SETREGS: ::c_uint = 13; - -pub const EFD_NONBLOCK: ::c_int = ::O_NONBLOCK; - -pub const SFD_NONBLOCK: ::c_int = ::O_NONBLOCK; - -pub const TCSANOW: ::c_int = 0; -pub const TCSADRAIN: ::c_int = 1; -pub const TCSAFLUSH: ::c_int = 2; - -pub const TIOCINQ: ::c_int = ::FIONREAD; - -pub const RTLD_GLOBAL: ::c_int = 0x100; -pub const RTLD_NOLOAD: ::c_int = 0x4; - -// TODO(#247) Temporarily musl-specific (available since musl 0.9.12 / Linux -// kernel 3.10). See also notbsd/mod.rs -pub const CLOCK_SGI_CYCLE: ::clockid_t = 10; -pub const CLOCK_TAI: ::clockid_t = 11; - -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const SIGSTKSZ: ::size_t = 8192; -pub const CBAUD: ::tcflag_t = 0o0010017; -pub const TAB1: ::c_int = 0x00000800; -pub const TAB2: ::c_int = 0x00001000; -pub const TAB3: ::c_int = 0x00001800; -pub const CR1: ::c_int = 0x00000200; -pub const CR2: ::c_int = 0x00000400; -pub const CR3: ::c_int = 0x00000600; -pub const FF1: ::c_int = 0x00008000; -pub const BS1: ::c_int = 0x00002000; -pub const VT1: ::c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: ::tcflag_t = 0x00000400; -pub const IXOFF: ::tcflag_t = 0x00001000; -pub const ONLCR: ::tcflag_t = 0x4; -pub const CSIZE: ::tcflag_t = 0x00000030; -pub const CS6: ::tcflag_t = 0x00000010; -pub const CS7: ::tcflag_t = 0x00000020; -pub const CS8: ::tcflag_t = 0x00000030; -pub const CSTOPB: ::tcflag_t = 0x00000040; -pub const CREAD: ::tcflag_t = 0x00000080; -pub const PARENB: ::tcflag_t = 0x00000100; -pub const PARODD: ::tcflag_t = 0x00000200; -pub const HUPCL: ::tcflag_t = 0x00000400; -pub const CLOCAL: ::tcflag_t = 0x00000800; -pub const ECHOKE: ::tcflag_t = 0x00000800; -pub const ECHOE: ::tcflag_t = 0x00000010; -pub const ECHOK: ::tcflag_t = 0x00000020; -pub const ECHONL: ::tcflag_t = 0x00000040; -pub const ECHOPRT: ::tcflag_t = 0x00000400; -pub const ECHOCTL: ::tcflag_t = 0x00000200; -pub const ISIG: ::tcflag_t = 0x00000001; -pub const ICANON: ::tcflag_t = 0x00000002; -pub const PENDIN: ::tcflag_t = 0x00004000; -pub const NOFLSH: ::tcflag_t = 0x00000080; - -extern { - pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; - pub fn ptrace(request: ::c_int, ...) -> ::c_long; - pub fn getpriority(which: ::c_int, who: ::id_t) -> ::c_int; - pub fn setpriority(which: ::c_int, who: ::id_t, prio: ::c_int) -> ::c_int; -} - -cfg_if! { - if #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] { - mod b64; - pub use self::b64::*; - } else if #[cfg(any(target_arch = "x86", - target_arch = "mips", - target_arch = "arm", - target_arch = "asmjs", - target_arch = "wasm32"))] { - mod b32; - pub use self::b32::*; - } else { } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/arm.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/arm.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/arm.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/arm.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,168 +0,0 @@ -pub type c_char = u8; -pub type wchar_t = u32; - -s! { - pub struct ipc_perm { - pub __key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::c_ushort, - __pad1: ::c_ushort, - pub __seq: ::c_ushort, - __pad2: ::c_ushort, - __unused1: ::c_ulong, - __unused2: ::c_ulong - } - - pub struct stat64 { - pub st_dev: ::dev_t, - __pad1: ::c_uint, - __st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad2: ::c_uint, - pub st_size: ::off64_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_ino: ::ino64_t, - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - __unused1: ::c_ulong, - pub shm_dtime: ::time_t, - __unused2: ::c_ulong, - pub shm_ctime: ::time_t, - __unused3: ::c_ulong, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::shmatt_t, - __unused4: ::c_ulong, - __unused5: ::c_ulong - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - pub msg_stime: ::time_t, - __glibc_reserved1: ::c_ulong, - pub msg_rtime: ::time_t, - __glibc_reserved2: ::c_ulong, - pub msg_ctime: ::time_t, - __glibc_reserved3: ::c_ulong, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __glibc_reserved4: ::c_ulong, - __glibc_reserved5: ::c_ulong, - } -} - -pub const O_DIRECT: ::c_int = 0x10000; -pub const O_DIRECTORY: ::c_int = 0x4000; -pub const O_NOFOLLOW: ::c_int = 0x8000; - -pub const MAP_LOCKED: ::c_int = 0x02000; -pub const MAP_NORESERVE: ::c_int = 0x04000; - -pub const EDEADLOCK: ::c_int = 35; - -pub const SO_PEERCRED: ::c_int = 17; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; - -pub const FIOCLEX: ::c_ulong = 0x5451; -pub const FIONBIO: ::c_ulong = 0x5421; - -pub const SYS_gettid: ::c_long = 224; -pub const SYS_perf_event_open: ::c_long = 364; - -pub const PTRACE_GETFPXREGS: ::c_uint = 18; -pub const PTRACE_SETFPXREGS: ::c_uint = 19; - -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const SIGSTKSZ: ::size_t = 8192; -pub const CBAUD: ::tcflag_t = 0o0010017; -pub const TAB1: ::c_int = 0x00000800; -pub const TAB2: ::c_int = 0x00001000; -pub const TAB3: ::c_int = 0x00001800; -pub const CR1: ::c_int = 0x00000200; -pub const CR2: ::c_int = 0x00000400; -pub const CR3: ::c_int = 0x00000600; -pub const FF1: ::c_int = 0x00008000; -pub const BS1: ::c_int = 0x00002000; -pub const VT1: ::c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: ::tcflag_t = 0x00000400; -pub const IXOFF: ::tcflag_t = 0x00001000; -pub const ONLCR: ::tcflag_t = 0x4; -pub const CSIZE: ::tcflag_t = 0x00000030; -pub const CS6: ::tcflag_t = 0x00000010; -pub const CS7: ::tcflag_t = 0x00000020; -pub const CS8: ::tcflag_t = 0x00000030; -pub const CSTOPB: ::tcflag_t = 0x00000040; -pub const CREAD: ::tcflag_t = 0x00000080; -pub const PARENB: ::tcflag_t = 0x00000100; -pub const PARODD: ::tcflag_t = 0x00000200; -pub const HUPCL: ::tcflag_t = 0x00000400; -pub const CLOCAL: ::tcflag_t = 0x00000800; -pub const ECHOKE: ::tcflag_t = 0x00000800; -pub const ECHOE: ::tcflag_t = 0x00000010; -pub const ECHOK: ::tcflag_t = 0x00000020; -pub const ECHONL: ::tcflag_t = 0x00000040; -pub const ECHOPRT: ::tcflag_t = 0x00000400; -pub const ECHOCTL: ::tcflag_t = 0x00000200; -pub const ISIG: ::tcflag_t = 0x00000001; -pub const ICANON: ::tcflag_t = 0x00000002; -pub const PENDIN: ::tcflag_t = 0x00004000; -pub const NOFLSH: ::tcflag_t = 0x00000080; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; -pub const EXTPROC: ::tcflag_t = 0x00010000; -pub const TCGETS: ::c_ulong = 0x5401; -pub const TCSETS: ::c_ulong = 0x5402; -pub const TCSETSW: ::c_ulong = 0x5403; -pub const TCSETSF: ::c_ulong = 0x5404; -pub const TCGETA: ::c_ulong = 0x5405; -pub const TCSETA: ::c_ulong = 0x5406; -pub const TCSETAW: ::c_ulong = 0x5407; -pub const TCSETAF: ::c_ulong = 0x5408; -pub const TCSBRK: ::c_ulong = 0x5409; -pub const TCXONC: ::c_ulong = 0x540A; -pub const TCFLSH: ::c_ulong = 0x540B; -pub const TIOCINQ: ::c_ulong = 0x541B; -pub const TIOCGPGRP: ::c_ulong = 0x540F; -pub const TIOCSPGRP: ::c_ulong = 0x5410; -pub const TIOCOUTQ: ::c_ulong = 0x5411; -pub const TIOCGWINSZ: ::c_ulong = 0x5413; -pub const TIOCSWINSZ: ::c_ulong = 0x5414; -pub const FIONREAD: ::c_ulong = 0x541B; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -//! 32-bit specific definitions for linux-like values - -pub type c_long = i32; -pub type c_ulong = u32; -pub type clock_t = i32; -pub type time_t = i32; -pub type suseconds_t = i32; -pub type ino_t = u32; -pub type off_t = i32; -pub type blkcnt_t = i32; -pub type __fsword_t = i32; - -pub type blksize_t = i32; -pub type nlink_t = u32; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - __pad1: ::c_short, - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad2: ::c_short, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __unused4: ::c_long, - __unused5: ::c_long, - } - - pub struct pthread_attr_t { - __size: [u32; 9] - } - - pub struct sigset_t { - __val: [::c_ulong; 32], - } - - pub struct sysinfo { - pub uptime: ::c_long, - pub loads: [::c_ulong; 3], - pub totalram: ::c_ulong, - pub freeram: ::c_ulong, - pub sharedram: ::c_ulong, - pub bufferram: ::c_ulong, - pub totalswap: ::c_ulong, - pub freeswap: ::c_ulong, - pub procs: ::c_ushort, - pub pad: ::c_ushort, - pub totalhigh: ::c_ulong, - pub freehigh: ::c_ulong, - pub mem_unit: ::c_uint, - pub _f: [::c_char; 8], - } -} - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; - -pub const PTRACE_GETFPREGS: ::c_uint = 14; -pub const PTRACE_SETFPREGS: ::c_uint = 15; -pub const PTRACE_GETREGS: ::c_uint = 12; -pub const PTRACE_SETREGS: ::c_uint = 13; - -cfg_if! { - if #[cfg(target_arch = "x86")] { - mod x86; - pub use self::x86::*; - } else if #[cfg(target_arch = "arm")] { - mod arm; - pub use self::arm::*; - } else if #[cfg(target_arch = "powerpc")] { - mod powerpc; - pub use self::powerpc::*; - } else { - // Unknown target_arch - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/powerpc.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/powerpc.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/powerpc.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/powerpc.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -pub type c_char = u8; -pub type wchar_t = i32; - -s! { - pub struct ipc_perm { - __key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::mode_t, - __seq: ::uint32_t, - __pad1: ::uint32_t, - __glibc_reserved1: ::uint64_t, - __glibc_reserved2: ::uint64_t, - } - - pub struct stat64 { - pub st_dev: ::dev_t, - pub st_ino: ::ino64_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad2: ::c_ushort, - pub st_size: ::off64_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __glibc_reserved4: ::c_ulong, - __glibc_reserved5: ::c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - __glibc_reserved1: ::c_uint, - pub shm_atime: ::time_t, - __glibc_reserved2: ::c_uint, - pub shm_dtime: ::time_t, - __glibc_reserved3: ::c_uint, - pub shm_ctime: ::time_t, - __glibc_reserved4: ::c_uint, - pub shm_segsz: ::size_t, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::shmatt_t, - __glibc_reserved5: ::c_ulong, - __glibc_reserved6: ::c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - __glibc_reserved1: ::c_uint, - pub msg_stime: ::time_t, - __glibc_reserved2: ::c_uint, - pub msg_rtime: ::time_t, - __glibc_reserved3: ::c_uint, - pub msg_ctime: ::time_t, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __glibc_reserved4: ::c_ulong, - __glibc_reserved5: ::c_ulong, - } -} - -pub const O_DIRECT: ::c_int = 0x20000; -pub const O_DIRECTORY: ::c_int = 0x4000; -pub const O_NOFOLLOW: ::c_int = 0x8000; - -pub const MAP_LOCKED: ::c_int = 0x00080; -pub const MAP_NORESERVE: ::c_int = 0x00040; - -pub const EDEADLOCK: ::c_int = 58; - -pub const SO_PEERCRED: ::c_int = 21; -pub const SO_RCVLOWAT: ::c_int = 16; -pub const SO_SNDLOWAT: ::c_int = 17; -pub const SO_RCVTIMEO: ::c_int = 18; -pub const SO_SNDTIMEO: ::c_int = 19; - -pub const FIOCLEX: ::c_ulong = 0x20006601; -pub const FIONBIO: ::c_ulong = 0x8004667e; - -pub const SYS_gettid: ::c_long = 207; -pub const SYS_perf_event_open: ::c_long = 319; - -pub const MCL_CURRENT: ::c_int = 0x2000; -pub const MCL_FUTURE: ::c_int = 0x4000; - -pub const SIGSTKSZ: ::size_t = 0x4000; -pub const CBAUD: ::tcflag_t = 0xff; -pub const TAB1: ::c_int = 0x400; -pub const TAB2: ::c_int = 0x800; -pub const TAB3: ::c_int = 0xc00; -pub const CR1: ::c_int = 0x1000; -pub const CR2: ::c_int = 0x2000; -pub const CR3: ::c_int = 0x3000; -pub const FF1: ::c_int = 0x4000; -pub const BS1: ::c_int = 0x8000; -pub const VT1: ::c_int = 0x10000; -pub const VWERASE: usize = 0xa; -pub const VREPRINT: usize = 0xb; -pub const VSUSP: usize = 0xc; -pub const VSTART: usize = 0xd; -pub const VSTOP: usize = 0xe; -pub const VDISCARD: usize = 0x10; -pub const VTIME: usize = 0x7; -pub const IXON: ::tcflag_t = 0x200; -pub const IXOFF: ::tcflag_t = 0x400; -pub const ONLCR: ::tcflag_t = 0x2; -pub const CSIZE: ::tcflag_t = 0x300; -pub const CS6: ::tcflag_t = 0x100; -pub const CS7: ::tcflag_t = 0x200; -pub const CS8: ::tcflag_t = 0x300; -pub const CSTOPB: ::tcflag_t = 0x400; -pub const CREAD: ::tcflag_t = 0x800; -pub const PARENB: ::tcflag_t = 0x1000; -pub const PARODD: ::tcflag_t = 0x2000; -pub const HUPCL: ::tcflag_t = 0x4000; -pub const CLOCAL: ::tcflag_t = 0x8000; -pub const ECHOKE: ::tcflag_t = 0x1; -pub const ECHOE: ::tcflag_t = 0x2; -pub const ECHOK: ::tcflag_t = 0x4; -pub const ECHONL: ::tcflag_t = 0x10; -pub const ECHOPRT: ::tcflag_t = 0x20; -pub const ECHOCTL: ::tcflag_t = 0x40; -pub const ISIG: ::tcflag_t = 0x80; -pub const ICANON: ::tcflag_t = 0x100; -pub const PENDIN: ::tcflag_t = 0x20000000; -pub const NOFLSH: ::tcflag_t = 0x80000000; - -pub const VEOL: usize = 6; -pub const VEOL2: usize = 8; -pub const VMIN: usize = 5; -pub const IEXTEN: ::tcflag_t = 0x400; -pub const TOSTOP: ::tcflag_t = 0x400000; -pub const FLUSHO: ::tcflag_t = 0x800000; -pub const EXTPROC: ::tcflag_t = 0x10000000; -pub const TCGETS: ::c_ulong = 0x403c7413; -pub const TCSETS: ::c_ulong = 0x803c7414; -pub const TCSETSW: ::c_ulong = 0x803c7415; -pub const TCSETSF: ::c_ulong = 0x803c7416; -pub const TCGETA: ::c_ulong = 0x40147417; -pub const TCSETA: ::c_ulong = 0x80147418; -pub const TCSETAW: ::c_ulong = 0x80147419; -pub const TCSETAF: ::c_ulong = 0x8014741c; -pub const TCSBRK: ::c_ulong = 0x2000741d; -pub const TCXONC: ::c_ulong = 0x2000741e; -pub const TCFLSH: ::c_ulong = 0x2000741f; -pub const TIOCINQ: ::c_ulong = 0x4004667f; -pub const TIOCGPGRP: ::c_ulong = 0x40047477; -pub const TIOCSPGRP: ::c_ulong = 0x80047476; -pub const TIOCOUTQ: ::c_ulong = 0x40047473; -pub const TIOCGWINSZ: ::c_ulong = 0x40087468; -pub const TIOCSWINSZ: ::c_ulong = 0x80087467; -pub const FIONREAD: ::c_ulong = 0x4004667f; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/x86.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/x86.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/x86.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b32/x86.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,213 +0,0 @@ -pub type c_char = i8; -pub type wchar_t = i32; -pub type greg_t = i32; - -s! { - pub struct _libc_fpreg { - pub significand: [u16; 4], - pub exponent: u16, - } - - pub struct _libc_fpstate { - pub cw: ::c_ulong, - pub sw: ::c_ulong, - pub tag: ::c_ulong, - pub ipoff: ::c_ulong, - pub cssel: ::c_ulong, - pub dataoff: ::c_ulong, - pub datasel: ::c_ulong, - pub _st: [_libc_fpreg; 8], - pub status: ::c_ulong, - } - - pub struct mcontext_t { - pub gregs: [greg_t; 19], - pub fpregs: *mut _libc_fpstate, - pub oldmask: ::c_ulong, - pub cr2: ::c_ulong, - } - - pub struct ucontext_t { - pub uc_flags: ::c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: ::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: ::sigset_t, - __private: [u8; 112], - } - - pub struct ipc_perm { - pub __key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::c_ushort, - __pad1: ::c_ushort, - pub __seq: ::c_ushort, - __pad2: ::c_ushort, - __unused1: ::c_ulong, - __unused2: ::c_ulong - } - - pub struct stat64 { - pub st_dev: ::dev_t, - __pad1: ::c_uint, - __st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad2: ::c_uint, - pub st_size: ::off64_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_ino: ::ino64_t, - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - __unused1: ::c_ulong, - pub shm_dtime: ::time_t, - __unused2: ::c_ulong, - pub shm_ctime: ::time_t, - __unused3: ::c_ulong, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::shmatt_t, - __unused4: ::c_ulong, - __unused5: ::c_ulong - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - pub msg_stime: ::time_t, - __glibc_reserved1: ::c_ulong, - pub msg_rtime: ::time_t, - __glibc_reserved2: ::c_ulong, - pub msg_ctime: ::time_t, - __glibc_reserved3: ::c_ulong, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __glibc_reserved4: ::c_ulong, - __glibc_reserved5: ::c_ulong, - } -} - -pub const O_DIRECT: ::c_int = 0x4000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_NOFOLLOW: ::c_int = 0x20000; - -pub const MAP_LOCKED: ::c_int = 0x02000; -pub const MAP_NORESERVE: ::c_int = 0x04000; -pub const MAP_32BIT: ::c_int = 0x0040; - -pub const EDEADLOCK: ::c_int = 35; - -pub const SO_PEERCRED: ::c_int = 17; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; - -pub const FIOCLEX: ::c_ulong = 0x5451; -pub const FIONBIO: ::c_ulong = 0x5421; - -pub const SYS_gettid: ::c_long = 224; -pub const SYS_perf_event_open: ::c_long = 336; - -pub const PTRACE_GETFPXREGS: ::c_uint = 18; -pub const PTRACE_SETFPXREGS: ::c_uint = 19; - -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const SIGSTKSZ: ::size_t = 8192; -pub const CBAUD: ::tcflag_t = 0o0010017; -pub const TAB1: ::c_int = 0x00000800; -pub const TAB2: ::c_int = 0x00001000; -pub const TAB3: ::c_int = 0x00001800; -pub const CR1: ::c_int = 0x00000200; -pub const CR2: ::c_int = 0x00000400; -pub const CR3: ::c_int = 0x00000600; -pub const FF1: ::c_int = 0x00008000; -pub const BS1: ::c_int = 0x00002000; -pub const VT1: ::c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: ::tcflag_t = 0x00000400; -pub const IXOFF: ::tcflag_t = 0x00001000; -pub const ONLCR: ::tcflag_t = 0x4; -pub const CSIZE: ::tcflag_t = 0x00000030; -pub const CS6: ::tcflag_t = 0x00000010; -pub const CS7: ::tcflag_t = 0x00000020; -pub const CS8: ::tcflag_t = 0x00000030; -pub const CSTOPB: ::tcflag_t = 0x00000040; -pub const CREAD: ::tcflag_t = 0x00000080; -pub const PARENB: ::tcflag_t = 0x00000100; -pub const PARODD: ::tcflag_t = 0x00000200; -pub const HUPCL: ::tcflag_t = 0x00000400; -pub const CLOCAL: ::tcflag_t = 0x00000800; -pub const ECHOKE: ::tcflag_t = 0x00000800; -pub const ECHOE: ::tcflag_t = 0x00000010; -pub const ECHOK: ::tcflag_t = 0x00000020; -pub const ECHONL: ::tcflag_t = 0x00000040; -pub const ECHOPRT: ::tcflag_t = 0x00000400; -pub const ECHOCTL: ::tcflag_t = 0x00000200; -pub const ISIG: ::tcflag_t = 0x00000001; -pub const ICANON: ::tcflag_t = 0x00000002; -pub const PENDIN: ::tcflag_t = 0x00004000; -pub const NOFLSH: ::tcflag_t = 0x00000080; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; -pub const EXTPROC: ::tcflag_t = 0x00010000; -pub const TCGETS: ::c_ulong = 0x5401; -pub const TCSETS: ::c_ulong = 0x5402; -pub const TCSETSW: ::c_ulong = 0x5403; -pub const TCSETSF: ::c_ulong = 0x5404; -pub const TCGETA: ::c_ulong = 0x5405; -pub const TCSETA: ::c_ulong = 0x5406; -pub const TCSETAW: ::c_ulong = 0x5407; -pub const TCSETAF: ::c_ulong = 0x5408; -pub const TCSBRK: ::c_ulong = 0x5409; -pub const TCXONC: ::c_ulong = 0x540A; -pub const TCFLSH: ::c_ulong = 0x540B; -pub const TIOCINQ: ::c_ulong = 0x541B; -pub const TIOCGPGRP: ::c_ulong = 0x540F; -pub const TIOCSPGRP: ::c_ulong = 0x5410; -pub const TIOCOUTQ: ::c_ulong = 0x5411; -pub const TIOCGWINSZ: ::c_ulong = 0x5413; -pub const TIOCSWINSZ: ::c_ulong = 0x5414; -pub const FIONREAD: ::c_ulong = 0x541B; - -extern { - pub fn getcontext(ucp: *mut ucontext_t) -> ::c_int; - pub fn setcontext(ucp: *const ucontext_t) -> ::c_int; - pub fn makecontext(ucp: *mut ucontext_t, - func: extern fn (), - argc: ::c_int, ...); - pub fn swapcontext(uocp: *mut ucontext_t, - ucp: *const ucontext_t) -> ::c_int; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/aarch64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/aarch64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/aarch64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/aarch64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,178 +0,0 @@ -//! AArch64-specific definitions for 64-bit linux-like values - -pub type c_char = u8; -pub type wchar_t = u32; -pub type nlink_t = u32; -pub type blksize_t = i32; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad1: ::dev_t, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - __pad2: ::c_int, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __unused: [::c_int; 2], - } - - pub struct stat64 { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - __pad1: ::dev_t, - pub st_size: ::off64_t, - pub st_blksize: ::blksize_t, - __pad2: ::c_int, - pub st_blocks: ::blkcnt64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __unused: [::c_int; 2], - } - - pub struct pthread_attr_t { - __size: [u64; 8] - } - - pub struct ipc_perm { - pub __key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::c_uint, - pub __seq: ::c_ushort, - __pad1: ::c_ushort, - __unused1: ::c_ulong, - __unused2: ::c_ulong - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - pub shm_dtime: ::time_t, - pub shm_ctime: ::time_t, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::shmatt_t, - __unused4: ::c_ulong, - __unused5: ::c_ulong - } -} - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 8; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 48; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 8; - -pub const O_DIRECT: ::c_int = 0x10000; -pub const O_DIRECTORY: ::c_int = 0x4000; -pub const O_NOFOLLOW: ::c_int = 0x8000; - -pub const MAP_LOCKED: ::c_int = 0x02000; -pub const MAP_NORESERVE: ::c_int = 0x04000; - -pub const EDEADLOCK: ::c_int = 35; - -pub const SO_PEERCRED: ::c_int = 17; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; - -pub const FIOCLEX: ::c_ulong = 0x5451; -pub const FIONBIO: ::c_ulong = 0x5421; - -pub const SYS_gettid: ::c_long = 178; -pub const SYS_perf_event_open: ::c_long = 241; - -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const SIGSTKSZ: ::size_t = 16384; -pub const CBAUD: ::tcflag_t = 0o0010017; -pub const TAB1: ::c_int = 0x00000800; -pub const TAB2: ::c_int = 0x00001000; -pub const TAB3: ::c_int = 0x00001800; -pub const CR1: ::c_int = 0x00000200; -pub const CR2: ::c_int = 0x00000400; -pub const CR3: ::c_int = 0x00000600; -pub const FF1: ::c_int = 0x00008000; -pub const BS1: ::c_int = 0x00002000; -pub const VT1: ::c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: ::tcflag_t = 0x00000400; -pub const IXOFF: ::tcflag_t = 0x00001000; -pub const ONLCR: ::tcflag_t = 0x4; -pub const CSIZE: ::tcflag_t = 0x00000030; -pub const CS6: ::tcflag_t = 0x00000010; -pub const CS7: ::tcflag_t = 0x00000020; -pub const CS8: ::tcflag_t = 0x00000030; -pub const CSTOPB: ::tcflag_t = 0x00000040; -pub const CREAD: ::tcflag_t = 0x00000080; -pub const PARENB: ::tcflag_t = 0x00000100; -pub const PARODD: ::tcflag_t = 0x00000200; -pub const HUPCL: ::tcflag_t = 0x00000400; -pub const CLOCAL: ::tcflag_t = 0x00000800; -pub const ECHOKE: ::tcflag_t = 0x00000800; -pub const ECHOE: ::tcflag_t = 0x00000010; -pub const ECHOK: ::tcflag_t = 0x00000020; -pub const ECHONL: ::tcflag_t = 0x00000040; -pub const ECHOPRT: ::tcflag_t = 0x00000400; -pub const ECHOCTL: ::tcflag_t = 0x00000200; -pub const ISIG: ::tcflag_t = 0x00000001; -pub const ICANON: ::tcflag_t = 0x00000002; -pub const PENDIN: ::tcflag_t = 0x00004000; -pub const NOFLSH: ::tcflag_t = 0x00000080; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; -pub const EXTPROC: ::tcflag_t = 0x00010000; -pub const TCGETS: ::c_ulong = 0x5401; -pub const TCSETS: ::c_ulong = 0x5402; -pub const TCSETSW: ::c_ulong = 0x5403; -pub const TCSETSF: ::c_ulong = 0x5404; -pub const TCGETA: ::c_ulong = 0x5405; -pub const TCSETA: ::c_ulong = 0x5406; -pub const TCSETAW: ::c_ulong = 0x5407; -pub const TCSETAF: ::c_ulong = 0x5408; -pub const TCSBRK: ::c_ulong = 0x5409; -pub const TCXONC: ::c_ulong = 0x540A; -pub const TCFLSH: ::c_ulong = 0x540B; -pub const TIOCINQ: ::c_ulong = 0x541B; -pub const TIOCGPGRP: ::c_ulong = 0x540F; -pub const TIOCSPGRP: ::c_ulong = 0x5410; -pub const TIOCOUTQ: ::c_ulong = 0x5411; -pub const TIOCGWINSZ: ::c_ulong = 0x5413; -pub const TIOCSWINSZ: ::c_ulong = 0x5414; -pub const FIONREAD: ::c_ulong = 0x541B; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -//! 64-bit specific definitions for linux-like values - -pub type c_long = i64; -pub type c_ulong = u64; -pub type clock_t = i64; -pub type time_t = i64; -pub type suseconds_t = i64; -pub type ino_t = u64; -pub type off_t = i64; -pub type blkcnt_t = i64; -pub type __fsword_t = ::c_long; - -s! { - pub struct sigset_t { - __val: [::c_ulong; 16], - } - - pub struct sysinfo { - pub uptime: ::c_long, - pub loads: [::c_ulong; 3], - pub totalram: ::c_ulong, - pub freeram: ::c_ulong, - pub sharedram: ::c_ulong, - pub bufferram: ::c_ulong, - pub totalswap: ::c_ulong, - pub freeswap: ::c_ulong, - pub procs: ::c_ushort, - pub pad: ::c_ushort, - pub totalhigh: ::c_ulong, - pub freehigh: ::c_ulong, - pub mem_unit: ::c_uint, - pub _f: [::c_char; 0], - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - pub msg_stime: ::time_t, - pub msg_rtime: ::time_t, - pub msg_ctime: ::time_t, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __glibc_reserved4: ::c_ulong, - __glibc_reserved5: ::c_ulong, - } -} - -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; - -cfg_if! { - if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(any(target_arch = "powerpc64"))] { - mod powerpc64; - pub use self::powerpc64::*; - } else if #[cfg(any(target_arch = "x86_64"))] { - mod x86_64; - pub use self::x86_64::*; - } else { - // Unknown target_arch - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/powerpc64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/powerpc64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/powerpc64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/powerpc64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,176 +0,0 @@ -//! PowerPC64-specific definitions for 64-bit linux-like values - -pub type c_char = u8; -pub type wchar_t = i32; -pub type nlink_t = u64; -pub type blksize_t = i64; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - __pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __unused: [::c_long; 3], - } - - pub struct stat64 { - pub st_dev: ::dev_t, - pub st_ino: ::ino64_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - __pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off64_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __reserved: [::c_long; 3], - } - - pub struct pthread_attr_t { - __size: [u64; 7] - } - - pub struct ipc_perm { - pub __key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::mode_t, - pub __seq: ::uint32_t, - __pad1: ::uint32_t, - __unused1: ::uint64_t, - __unused2: ::c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_atime: ::time_t, - pub shm_dtime: ::time_t, - pub shm_ctime: ::time_t, - pub shm_segsz: ::size_t, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::shmatt_t, - __unused4: ::c_ulong, - __unused5: ::c_ulong - } -} - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; - -pub const O_DIRECTORY: ::c_int = 0x4000; -pub const O_NOFOLLOW: ::c_int = 0x8000; -pub const O_DIRECT: ::c_int = 0x20000; - -pub const MAP_LOCKED: ::c_int = 0x00080; -pub const MAP_NORESERVE: ::c_int = 0x00040; - -pub const EDEADLOCK: ::c_int = 58; - -pub const SO_PEERCRED: ::c_int = 21; -pub const SO_RCVLOWAT: ::c_int = 16; -pub const SO_SNDLOWAT: ::c_int = 17; -pub const SO_RCVTIMEO: ::c_int = 18; -pub const SO_SNDTIMEO: ::c_int = 19; - -pub const FIOCLEX: ::c_ulong = 0x20006601; -pub const FIONBIO: ::c_ulong = 0x8004667e; - -pub const SYS_gettid: ::c_long = 207; -pub const SYS_perf_event_open: ::c_long = 319; - -pub const MCL_CURRENT: ::c_int = 0x2000; -pub const MCL_FUTURE: ::c_int = 0x4000; - -pub const SIGSTKSZ: ::size_t = 0x4000; -pub const CBAUD: ::tcflag_t = 0xff; -pub const TAB1: ::c_int = 0x400; -pub const TAB2: ::c_int = 0x800; -pub const TAB3: ::c_int = 0xc00; -pub const CR1: ::c_int = 0x1000; -pub const CR2: ::c_int = 0x2000; -pub const CR3: ::c_int = 0x3000; -pub const FF1: ::c_int = 0x4000; -pub const BS1: ::c_int = 0x8000; -pub const VT1: ::c_int = 0x10000; -pub const VWERASE: usize = 0xa; -pub const VREPRINT: usize = 0xb; -pub const VSUSP: usize = 0xc; -pub const VSTART: usize = 0xd; -pub const VSTOP: usize = 0xe; -pub const VDISCARD: usize = 0x10; -pub const VTIME: usize = 0x7; -pub const IXON: ::tcflag_t = 0x200; -pub const IXOFF: ::tcflag_t = 0x400; -pub const ONLCR: ::tcflag_t = 0x2; -pub const CSIZE: ::tcflag_t = 0x300; -pub const CS6: ::tcflag_t = 0x100; -pub const CS7: ::tcflag_t = 0x200; -pub const CS8: ::tcflag_t = 0x300; -pub const CSTOPB: ::tcflag_t = 0x400; -pub const CREAD: ::tcflag_t = 0x800; -pub const PARENB: ::tcflag_t = 0x1000; -pub const PARODD: ::tcflag_t = 0x2000; -pub const HUPCL: ::tcflag_t = 0x4000; -pub const CLOCAL: ::tcflag_t = 0x8000; -pub const ECHOKE: ::tcflag_t = 0x1; -pub const ECHOE: ::tcflag_t = 0x2; -pub const ECHOK: ::tcflag_t = 0x4; -pub const ECHONL: ::tcflag_t = 0x10; -pub const ECHOPRT: ::tcflag_t = 0x20; -pub const ECHOCTL: ::tcflag_t = 0x40; -pub const ISIG: ::tcflag_t = 0x80; -pub const ICANON: ::tcflag_t = 0x100; -pub const PENDIN: ::tcflag_t = 0x20000000; -pub const NOFLSH: ::tcflag_t = 0x80000000; - -pub const VEOL: usize = 6; -pub const VEOL2: usize = 8; -pub const VMIN: usize = 5; -pub const IEXTEN: ::tcflag_t = 0x400; -pub const TOSTOP: ::tcflag_t = 0x400000; -pub const FLUSHO: ::tcflag_t = 0x800000; -pub const EXTPROC: ::tcflag_t = 0x10000000; -pub const TCGETS: ::c_ulong = 0x403c7413; -pub const TCSETS: ::c_ulong = 0x803c7414; -pub const TCSETSW: ::c_ulong = 0x803c7415; -pub const TCSETSF: ::c_ulong = 0x803c7416; -pub const TCGETA: ::c_ulong = 0x40147417; -pub const TCSETA: ::c_ulong = 0x80147418; -pub const TCSETAW: ::c_ulong = 0x80147419; -pub const TCSETAF: ::c_ulong = 0x8014741c; -pub const TCSBRK: ::c_ulong = 0x2000741d; -pub const TCXONC: ::c_ulong = 0x2000741e; -pub const TCFLSH: ::c_ulong = 0x2000741f; -pub const TIOCINQ: ::c_ulong = 0x4004667f; -pub const TIOCGPGRP: ::c_ulong = 0x40047477; -pub const TIOCSPGRP: ::c_ulong = 0x80047476; -pub const TIOCOUTQ: ::c_ulong = 0x40047473; -pub const TIOCGWINSZ: ::c_ulong = 0x40087468; -pub const TIOCSWINSZ: ::c_ulong = 0x80087467; -pub const FIONREAD: ::c_ulong = 0x4004667f; diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/x86_64.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/x86_64.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/x86_64.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/b64/x86_64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,246 +0,0 @@ -//! x86_64-specific definitions for 64-bit linux-like values - -pub type c_char = i8; -pub type wchar_t = i32; -pub type nlink_t = u64; -pub type blksize_t = i64; -pub type greg_t = i64; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - __pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __unused: [::c_long; 3], - } - - pub struct stat64 { - pub st_dev: ::dev_t, - pub st_ino: ::ino64_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - __pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt64_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - __reserved: [::c_long; 3], - } - - pub struct pthread_attr_t { - __size: [u64; 7] - } - - pub struct _libc_fpxreg { - pub significand: [u16; 4], - pub exponent: u16, - __private: [u16; 3], - } - - pub struct _libc_xmmreg { - pub element: [u32; 4], - } - - pub struct _libc_fpstate { - pub cwd: u16, - pub swd: u16, - pub ftw: u16, - pub fop: u16, - pub rip: u64, - pub rdp: u64, - pub mxcsr: u32, - pub mxcr_mask: u32, - pub _st: [_libc_fpxreg; 8], - pub _xmm: [_libc_xmmreg; 16], - __private: [u64; 12], - } - - pub struct mcontext_t { - pub gregs: [greg_t; 23], - pub fpregs: *mut _libc_fpstate, - __private: [u64; 8], - } - - pub struct ucontext_t { - pub uc_flags: ::c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: ::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: ::sigset_t, - __private: [u8; 512], - } - - pub struct ipc_perm { - pub __key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::c_ushort, - __pad1: ::c_ushort, - pub __seq: ::c_ushort, - __pad2: ::c_ushort, - __unused1: ::c_ulong, - __unused2: ::c_ulong - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - pub shm_dtime: ::time_t, - pub shm_ctime: ::time_t, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::shmatt_t, - __unused4: ::c_ulong, - __unused5: ::c_ulong - } -} - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; - -pub const O_DIRECT: ::c_int = 0x4000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_NOFOLLOW: ::c_int = 0x20000; - -pub const MAP_LOCKED: ::c_int = 0x02000; -pub const MAP_NORESERVE: ::c_int = 0x04000; -pub const MAP_32BIT: ::c_int = 0x0040; - -pub const EDEADLOCK: ::c_int = 35; - -pub const SO_PEERCRED: ::c_int = 17; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; - -pub const FIOCLEX: ::c_ulong = 0x5451; -pub const FIONBIO: ::c_ulong = 0x5421; - -pub const PTRACE_GETFPREGS: ::c_uint = 14; -pub const PTRACE_SETFPREGS: ::c_uint = 15; -pub const PTRACE_GETFPXREGS: ::c_uint = 18; -pub const PTRACE_SETFPXREGS: ::c_uint = 19; -pub const PTRACE_GETREGS: ::c_uint = 12; -pub const PTRACE_SETREGS: ::c_uint = 13; -pub const PTRACE_O_EXITKILL: ::c_uint = 1048576; -pub const PTRACE_O_TRACECLONE: ::c_uint = 8; -pub const PTRACE_O_TRACEEXEC: ::c_uint = 16; -pub const PTRACE_O_TRACEEXIT: ::c_uint = 64; -pub const PTRACE_O_TRACEFORK: ::c_uint = 2; -pub const PTRACE_O_TRACESYSGOOD: ::c_uint = 1; -pub const PTRACE_O_TRACEVFORK: ::c_uint = 4; -pub const PTRACE_O_TRACEVFORKDONE: ::c_uint = 32; -pub const PTRACE_O_TRACESECCOMP: ::c_uint = 128; -pub const PTRACE_O_SUSPEND_SECCOMP: ::c_uint = 2097152; -pub const PTRACE_PEEKSIGINFO_SHARED: ::c_uint = 1; - -pub const SYS_gettid: ::c_long = 186; -pub const SYS_perf_event_open: ::c_long = 298; - -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const SIGSTKSZ: ::size_t = 8192; -pub const CBAUD: ::tcflag_t = 0o0010017; -pub const TAB1: ::c_int = 0x00000800; -pub const TAB2: ::c_int = 0x00001000; -pub const TAB3: ::c_int = 0x00001800; -pub const CR1: ::c_int = 0x00000200; -pub const CR2: ::c_int = 0x00000400; -pub const CR3: ::c_int = 0x00000600; -pub const FF1: ::c_int = 0x00008000; -pub const BS1: ::c_int = 0x00002000; -pub const VT1: ::c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: ::tcflag_t = 0x00000400; -pub const IXOFF: ::tcflag_t = 0x00001000; -pub const ONLCR: ::tcflag_t = 0x4; -pub const CSIZE: ::tcflag_t = 0x00000030; -pub const CS6: ::tcflag_t = 0x00000010; -pub const CS7: ::tcflag_t = 0x00000020; -pub const CS8: ::tcflag_t = 0x00000030; -pub const CSTOPB: ::tcflag_t = 0x00000040; -pub const CREAD: ::tcflag_t = 0x00000080; -pub const PARENB: ::tcflag_t = 0x00000100; -pub const PARODD: ::tcflag_t = 0x00000200; -pub const HUPCL: ::tcflag_t = 0x00000400; -pub const CLOCAL: ::tcflag_t = 0x00000800; -pub const ECHOKE: ::tcflag_t = 0x00000800; -pub const ECHOE: ::tcflag_t = 0x00000010; -pub const ECHOK: ::tcflag_t = 0x00000020; -pub const ECHONL: ::tcflag_t = 0x00000040; -pub const ECHOPRT: ::tcflag_t = 0x00000400; -pub const ECHOCTL: ::tcflag_t = 0x00000200; -pub const ISIG: ::tcflag_t = 0x00000001; -pub const ICANON: ::tcflag_t = 0x00000002; -pub const PENDIN: ::tcflag_t = 0x00004000; -pub const NOFLSH: ::tcflag_t = 0x00000080; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; -pub const EXTPROC: ::tcflag_t = 0x00010000; -pub const TCGETS: ::c_ulong = 0x5401; -pub const TCSETS: ::c_ulong = 0x5402; -pub const TCSETSW: ::c_ulong = 0x5403; -pub const TCSETSF: ::c_ulong = 0x5404; -pub const TCGETA: ::c_ulong = 0x5405; -pub const TCSETA: ::c_ulong = 0x5406; -pub const TCSETAW: ::c_ulong = 0x5407; -pub const TCSETAF: ::c_ulong = 0x5408; -pub const TCSBRK: ::c_ulong = 0x5409; -pub const TCXONC: ::c_ulong = 0x540A; -pub const TCFLSH: ::c_ulong = 0x540B; -pub const TIOCINQ: ::c_ulong = 0x541B; -pub const TIOCGPGRP: ::c_ulong = 0x540F; -pub const TIOCSPGRP: ::c_ulong = 0x5410; -pub const TIOCOUTQ: ::c_ulong = 0x5411; -pub const TIOCGWINSZ: ::c_ulong = 0x5413; -pub const TIOCSWINSZ: ::c_ulong = 0x5414; -pub const FIONREAD: ::c_ulong = 0x541B; - -extern { - pub fn getcontext(ucp: *mut ucontext_t) -> ::c_int; - pub fn setcontext(ucp: *const ucontext_t) -> ::c_int; - pub fn makecontext(ucp: *mut ucontext_t, - func: extern fn (), - argc: ::c_int, ...); - pub fn swapcontext(uocp: *mut ucontext_t, - ucp: *const ucontext_t) -> ::c_int; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/other/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,642 +0,0 @@ -pub type fsblkcnt_t = ::c_ulong; -pub type fsfilcnt_t = ::c_ulong; -pub type rlim_t = c_ulong; -pub type __priority_which_t = ::c_uint; - -s! { - pub struct aiocb { - pub aio_fildes: ::c_int, - pub aio_lio_opcode: ::c_int, - pub aio_reqprio: ::c_int, - pub aio_buf: *mut ::c_void, - pub aio_nbytes: ::size_t, - pub aio_sigevent: ::sigevent, - __next_prio: *mut aiocb, - __abs_prio: ::c_int, - __policy: ::c_int, - __error_code: ::c_int, - __return_value: ::ssize_t, - pub aio_offset: off_t, - #[cfg(target_pointer_width = "32")] - __unused1: [::c_char; 4], - __glibc_reserved: [::c_char; 32] - } - - pub struct __exit_status { - pub e_termination: ::c_short, - pub e_exit: ::c_short, - } - - pub struct __timeval { - pub tv_sec: ::int32_t, - pub tv_usec: ::int32_t, - } - - pub struct utmpx { - pub ut_type: ::c_short, - pub ut_pid: ::pid_t, - pub ut_line: [::c_char; __UT_LINESIZE], - pub ut_id: [::c_char; 4], - - pub ut_user: [::c_char; __UT_NAMESIZE], - pub ut_host: [::c_char; __UT_HOSTSIZE], - pub ut_exit: __exit_status, - - #[cfg(any(target_arch = "aarch64", target_pointer_width = "32"))] - pub ut_session: ::c_long, - #[cfg(any(target_arch = "aarch64", target_pointer_width = "32"))] - pub ut_tv: ::timeval, - - #[cfg(not(any(target_arch = "aarch64", target_pointer_width = "32")))] - pub ut_session: ::int32_t, - #[cfg(not(any(target_arch = "aarch64", target_pointer_width = "32")))] - pub ut_tv: __timeval, - - pub ut_addr_v6: [::int32_t; 4], - __glibc_reserved: [::c_char; 20], - } - - pub struct sigaction { - pub sa_sigaction: ::sighandler_t, - pub sa_mask: ::sigset_t, - pub sa_flags: ::c_int, - _restorer: *mut ::c_void, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_flags: ::c_int, - pub ss_size: ::size_t - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_errno: ::c_int, - pub si_code: ::c_int, - pub _pad: [::c_int; 29], - _align: [usize; 0], - } - - pub struct glob64_t { - pub gl_pathc: ::size_t, - pub gl_pathv: *mut *mut ::c_char, - pub gl_offs: ::size_t, - pub gl_flags: ::c_int, - - __unused1: *mut ::c_void, - __unused2: *mut ::c_void, - __unused3: *mut ::c_void, - __unused4: *mut ::c_void, - __unused5: *mut ::c_void, - } - - pub struct ucred { - pub pid: ::pid_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - } - - pub struct statfs { - pub f_type: __fsword_t, - pub f_bsize: __fsword_t, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_fsid: ::fsid_t, - - pub f_namelen: __fsword_t, - pub f_frsize: __fsword_t, - f_spare: [__fsword_t; 5], - } - - pub struct msghdr { - pub msg_name: *mut ::c_void, - pub msg_namelen: ::socklen_t, - pub msg_iov: *mut ::iovec, - pub msg_iovlen: ::size_t, - pub msg_control: *mut ::c_void, - pub msg_controllen: ::size_t, - pub msg_flags: ::c_int, - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_line: ::cc_t, - pub c_cc: [::cc_t; ::NCCS], - pub c_ispeed: ::speed_t, - pub c_ospeed: ::speed_t, - } - - pub struct flock { - pub l_type: ::c_short, - pub l_whence: ::c_short, - pub l_start: ::off_t, - pub l_len: ::off_t, - pub l_pid: ::pid_t, - } - - // FIXME this is actually a union - pub struct sem_t { - #[cfg(target_pointer_width = "32")] - __size: [::c_char; 16], - #[cfg(target_pointer_width = "64")] - __size: [::c_char; 32], - __align: [::c_long; 0], - } -} - -pub const __UT_LINESIZE: usize = 32; -pub const __UT_NAMESIZE: usize = 32; -pub const __UT_HOSTSIZE: usize = 256; -pub const EMPTY: ::c_short = 0; -pub const RUN_LVL: ::c_short = 1; -pub const BOOT_TIME: ::c_short = 2; -pub const NEW_TIME: ::c_short = 3; -pub const OLD_TIME: ::c_short = 4; -pub const INIT_PROCESS: ::c_short = 5; -pub const LOGIN_PROCESS: ::c_short = 6; -pub const USER_PROCESS: ::c_short = 7; -pub const DEAD_PROCESS: ::c_short = 8; -pub const ACCOUNTING: ::c_short = 9; - -pub const RLIMIT_RSS: ::c_int = 5; -pub const RLIMIT_NOFILE: ::c_int = 7; -pub const RLIMIT_AS: ::c_int = 9; -pub const RLIMIT_NPROC: ::c_int = 6; -pub const RLIMIT_MEMLOCK: ::c_int = 8; -pub const RLIM_INFINITY: ::rlim_t = !0; -pub const RLIMIT_RTTIME: ::c_int = 15; -pub const RLIMIT_NLIMITS: ::c_int = 16; - -pub const O_APPEND: ::c_int = 1024; -pub const O_CREAT: ::c_int = 64; -pub const O_EXCL: ::c_int = 128; -pub const O_NOCTTY: ::c_int = 256; -pub const O_NONBLOCK: ::c_int = 2048; -pub const O_SYNC: ::c_int = 1052672; -pub const O_RSYNC: ::c_int = 1052672; -pub const O_DSYNC: ::c_int = 4096; -pub const O_FSYNC: ::c_int = 0x101000; - -pub const SOCK_NONBLOCK: ::c_int = O_NONBLOCK; - -pub const LC_PAPER: ::c_int = 7; -pub const LC_NAME: ::c_int = 8; -pub const LC_ADDRESS: ::c_int = 9; -pub const LC_TELEPHONE: ::c_int = 10; -pub const LC_MEASUREMENT: ::c_int = 11; -pub const LC_IDENTIFICATION: ::c_int = 12; -pub const LC_PAPER_MASK: ::c_int = (1 << LC_PAPER); -pub const LC_NAME_MASK: ::c_int = (1 << LC_NAME); -pub const LC_ADDRESS_MASK: ::c_int = (1 << LC_ADDRESS); -pub const LC_TELEPHONE_MASK: ::c_int = (1 << LC_TELEPHONE); -pub const LC_MEASUREMENT_MASK: ::c_int = (1 << LC_MEASUREMENT); -pub const LC_IDENTIFICATION_MASK: ::c_int = (1 << LC_IDENTIFICATION); -pub const LC_ALL_MASK: ::c_int = ::LC_CTYPE_MASK - | ::LC_NUMERIC_MASK - | ::LC_TIME_MASK - | ::LC_COLLATE_MASK - | ::LC_MONETARY_MASK - | ::LC_MESSAGES_MASK - | LC_PAPER_MASK - | LC_NAME_MASK - | LC_ADDRESS_MASK - | LC_TELEPHONE_MASK - | LC_MEASUREMENT_MASK - | LC_IDENTIFICATION_MASK; - -pub const MAP_ANON: ::c_int = 0x0020; -pub const MAP_ANONYMOUS: ::c_int = 0x0020; -pub const MAP_GROWSDOWN: ::c_int = 0x0100; -pub const MAP_DENYWRITE: ::c_int = 0x0800; -pub const MAP_EXECUTABLE: ::c_int = 0x01000; -pub const MAP_POPULATE: ::c_int = 0x08000; -pub const MAP_NONBLOCK: ::c_int = 0x010000; -pub const MAP_STACK: ::c_int = 0x020000; - -pub const EDEADLK: ::c_int = 35; -pub const ENAMETOOLONG: ::c_int = 36; -pub const ENOLCK: ::c_int = 37; -pub const ENOSYS: ::c_int = 38; -pub const ENOTEMPTY: ::c_int = 39; -pub const ELOOP: ::c_int = 40; -pub const ENOMSG: ::c_int = 42; -pub const EIDRM: ::c_int = 43; -pub const ECHRNG: ::c_int = 44; -pub const EL2NSYNC: ::c_int = 45; -pub const EL3HLT: ::c_int = 46; -pub const EL3RST: ::c_int = 47; -pub const ELNRNG: ::c_int = 48; -pub const EUNATCH: ::c_int = 49; -pub const ENOCSI: ::c_int = 50; -pub const EL2HLT: ::c_int = 51; -pub const EBADE: ::c_int = 52; -pub const EBADR: ::c_int = 53; -pub const EXFULL: ::c_int = 54; -pub const ENOANO: ::c_int = 55; -pub const EBADRQC: ::c_int = 56; -pub const EBADSLT: ::c_int = 57; -pub const EMULTIHOP: ::c_int = 72; -pub const EOVERFLOW: ::c_int = 75; -pub const ENOTUNIQ: ::c_int = 76; -pub const EBADFD: ::c_int = 77; -pub const EBADMSG: ::c_int = 74; -pub const EREMCHG: ::c_int = 78; -pub const ELIBACC: ::c_int = 79; -pub const ELIBBAD: ::c_int = 80; -pub const ELIBSCN: ::c_int = 81; -pub const ELIBMAX: ::c_int = 82; -pub const ELIBEXEC: ::c_int = 83; -pub const EILSEQ: ::c_int = 84; -pub const ERESTART: ::c_int = 85; -pub const ESTRPIPE: ::c_int = 86; -pub const EUSERS: ::c_int = 87; -pub const ENOTSOCK: ::c_int = 88; -pub const EDESTADDRREQ: ::c_int = 89; -pub const EMSGSIZE: ::c_int = 90; -pub const EPROTOTYPE: ::c_int = 91; -pub const ENOPROTOOPT: ::c_int = 92; -pub const EPROTONOSUPPORT: ::c_int = 93; -pub const ESOCKTNOSUPPORT: ::c_int = 94; -pub const EOPNOTSUPP: ::c_int = 95; -pub const ENOTSUP: ::c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: ::c_int = 96; -pub const EAFNOSUPPORT: ::c_int = 97; -pub const EADDRINUSE: ::c_int = 98; -pub const EADDRNOTAVAIL: ::c_int = 99; -pub const ENETDOWN: ::c_int = 100; -pub const ENETUNREACH: ::c_int = 101; -pub const ENETRESET: ::c_int = 102; -pub const ECONNABORTED: ::c_int = 103; -pub const ECONNRESET: ::c_int = 104; -pub const ENOBUFS: ::c_int = 105; -pub const EISCONN: ::c_int = 106; -pub const ENOTCONN: ::c_int = 107; -pub const ESHUTDOWN: ::c_int = 108; -pub const ETOOMANYREFS: ::c_int = 109; -pub const ETIMEDOUT: ::c_int = 110; -pub const ECONNREFUSED: ::c_int = 111; -pub const EHOSTDOWN: ::c_int = 112; -pub const EHOSTUNREACH: ::c_int = 113; -pub const EALREADY: ::c_int = 114; -pub const EINPROGRESS: ::c_int = 115; -pub const ESTALE: ::c_int = 116; -pub const EUCLEAN: ::c_int = 117; -pub const ENOTNAM: ::c_int = 118; -pub const ENAVAIL: ::c_int = 119; -pub const EISNAM: ::c_int = 120; -pub const EREMOTEIO: ::c_int = 121; -pub const EDQUOT: ::c_int = 122; -pub const ENOMEDIUM: ::c_int = 123; -pub const EMEDIUMTYPE: ::c_int = 124; -pub const ECANCELED: ::c_int = 125; -pub const ENOKEY: ::c_int = 126; -pub const EKEYEXPIRED: ::c_int = 127; -pub const EKEYREVOKED: ::c_int = 128; -pub const EKEYREJECTED: ::c_int = 129; -pub const EOWNERDEAD: ::c_int = 130; -pub const ENOTRECOVERABLE: ::c_int = 131; -pub const EHWPOISON: ::c_int = 133; -pub const ERFKILL: ::c_int = 132; - -pub const SOCK_STREAM: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 2; -pub const SOCK_SEQPACKET: ::c_int = 5; - -pub const SOL_SOCKET: ::c_int = 1; - -pub const SO_REUSEADDR: ::c_int = 2; -pub const SO_TYPE: ::c_int = 3; -pub const SO_ERROR: ::c_int = 4; -pub const SO_DONTROUTE: ::c_int = 5; -pub const SO_BROADCAST: ::c_int = 6; -pub const SO_SNDBUF: ::c_int = 7; -pub const SO_RCVBUF: ::c_int = 8; -pub const SO_KEEPALIVE: ::c_int = 9; -pub const SO_OOBINLINE: ::c_int = 10; -pub const SO_LINGER: ::c_int = 13; -pub const SO_REUSEPORT: ::c_int = 15; -pub const SO_ACCEPTCONN: ::c_int = 30; - -pub const TCP_COOKIE_TRANSACTIONS: ::c_int = 15; -pub const TCP_THIN_LINEAR_TIMEOUTS: ::c_int = 16; -pub const TCP_THIN_DUPACK: ::c_int = 17; -pub const TCP_USER_TIMEOUT: ::c_int = 18; -pub const TCP_REPAIR: ::c_int = 19; -pub const TCP_REPAIR_QUEUE: ::c_int = 20; -pub const TCP_QUEUE_SEQ: ::c_int = 21; -pub const TCP_REPAIR_OPTIONS: ::c_int = 22; -pub const TCP_FASTOPEN: ::c_int = 23; -pub const TCP_TIMESTAMP: ::c_int = 24; - -pub const SA_ONSTACK: ::c_int = 0x08000000; -pub const SA_SIGINFO: ::c_int = 0x00000004; -pub const SA_NOCLDWAIT: ::c_int = 0x00000002; - -pub const SIGCHLD: ::c_int = 17; -pub const SIGBUS: ::c_int = 7; -pub const SIGUSR1: ::c_int = 10; -pub const SIGUSR2: ::c_int = 12; -pub const SIGCONT: ::c_int = 18; -pub const SIGSTOP: ::c_int = 19; -pub const SIGTSTP: ::c_int = 20; -pub const SIGURG: ::c_int = 23; -pub const SIGIO: ::c_int = 29; -pub const SIGSYS: ::c_int = 31; -pub const SIGSTKFLT: ::c_int = 16; -pub const SIGUNUSED: ::c_int = 31; -pub const SIGTTIN: ::c_int = 21; -pub const SIGTTOU: ::c_int = 22; -pub const SIGXCPU: ::c_int = 24; -pub const SIGXFSZ: ::c_int = 25; -pub const SIGVTALRM: ::c_int = 26; -pub const SIGPROF: ::c_int = 27; -pub const SIGWINCH: ::c_int = 28; -pub const SIGPOLL: ::c_int = 29; -pub const SIGPWR: ::c_int = 30; -pub const SIG_SETMASK: ::c_int = 2; -pub const SIG_BLOCK: ::c_int = 0x000000; -pub const SIG_UNBLOCK: ::c_int = 0x01; - -pub const SIGEV_THREAD_ID: ::c_int = 4; - -pub const POLLRDNORM: ::c_short = 0x040; -pub const POLLWRNORM: ::c_short = 0x100; -pub const POLLRDBAND: ::c_short = 0x080; -pub const POLLWRBAND: ::c_short = 0x200; - -pub const FALLOC_FL_KEEP_SIZE: ::c_int = 0x01; -pub const FALLOC_FL_PUNCH_HOLE: ::c_int = 0x02; - -pub const BUFSIZ: ::c_uint = 8192; -pub const TMP_MAX: ::c_uint = 238328; -pub const FOPEN_MAX: ::c_uint = 16; -pub const POSIX_FADV_DONTNEED: ::c_int = 4; -pub const POSIX_FADV_NOREUSE: ::c_int = 5; -pub const POSIX_MADV_DONTNEED: ::c_int = 4; -pub const _SC_2_C_VERSION: ::c_int = 96; -pub const O_ACCMODE: ::c_int = 3; -pub const O_ASYNC: ::c_int = 0x2000; -pub const O_NDELAY: ::c_int = 0x800; -pub const ST_RELATIME: ::c_ulong = 4096; -pub const NI_MAXHOST: ::socklen_t = 1025; - -pub const ADFS_SUPER_MAGIC: ::c_long = 0x0000adf5; -pub const AFFS_SUPER_MAGIC: ::c_long = 0x0000adff; -pub const CODA_SUPER_MAGIC: ::c_long = 0x73757245; -pub const CRAMFS_MAGIC: ::c_long = 0x28cd3d45; -pub const EFS_SUPER_MAGIC: ::c_long = 0x00414a53; -pub const EXT2_SUPER_MAGIC: ::c_long = 0x0000ef53; -pub const EXT3_SUPER_MAGIC: ::c_long = 0x0000ef53; -pub const EXT4_SUPER_MAGIC: ::c_long = 0x0000ef53; -pub const HPFS_SUPER_MAGIC: ::c_long = 0xf995e849; -pub const HUGETLBFS_MAGIC: ::c_long = 0x958458f6; -pub const ISOFS_SUPER_MAGIC: ::c_long = 0x00009660; -pub const JFFS2_SUPER_MAGIC: ::c_long = 0x000072b6; -pub const MINIX_SUPER_MAGIC: ::c_long = 0x0000137f; -pub const MINIX_SUPER_MAGIC2: ::c_long = 0x0000138f; -pub const MINIX2_SUPER_MAGIC: ::c_long = 0x00002468; -pub const MINIX2_SUPER_MAGIC2: ::c_long = 0x00002478; -pub const MSDOS_SUPER_MAGIC: ::c_long = 0x00004d44; -pub const NCP_SUPER_MAGIC: ::c_long = 0x0000564c; -pub const NFS_SUPER_MAGIC: ::c_long = 0x00006969; -pub const OPENPROM_SUPER_MAGIC: ::c_long = 0x00009fa1; -pub const PROC_SUPER_MAGIC: ::c_long = 0x00009fa0; -pub const QNX4_SUPER_MAGIC: ::c_long = 0x0000002f; -pub const REISERFS_SUPER_MAGIC: ::c_long = 0x52654973; -pub const SMB_SUPER_MAGIC: ::c_long = 0x0000517b; -pub const TMPFS_MAGIC: ::c_long = 0x01021994; -pub const USBDEVICE_SUPER_MAGIC: ::c_long = 0x00009fa2; - -pub const VEOF: usize = 4; -pub const IUTF8: ::tcflag_t = 0x00004000; - -pub const CPU_SETSIZE: ::c_int = 0x400; - -pub const QFMT_VFS_V1: ::c_int = 4; - -pub const PTRACE_TRACEME: ::c_uint = 0; -pub const PTRACE_PEEKTEXT: ::c_uint = 1; -pub const PTRACE_PEEKDATA: ::c_uint = 2; -pub const PTRACE_PEEKUSER: ::c_uint = 3; -pub const PTRACE_POKETEXT: ::c_uint = 4; -pub const PTRACE_POKEDATA: ::c_uint = 5; -pub const PTRACE_POKEUSER: ::c_uint = 6; -pub const PTRACE_CONT: ::c_uint = 7; -pub const PTRACE_KILL: ::c_uint = 8; -pub const PTRACE_SINGLESTEP: ::c_uint = 9; -pub const PTRACE_ATTACH: ::c_uint = 16; -pub const PTRACE_DETACH: ::c_uint = 17; -pub const PTRACE_SYSCALL: ::c_uint = 24; -pub const PTRACE_SETOPTIONS: ::c_uint = 0x4200; -pub const PTRACE_GETEVENTMSG: ::c_uint = 0x4201; -pub const PTRACE_GETSIGINFO: ::c_uint = 0x4202; -pub const PTRACE_SETSIGINFO: ::c_uint = 0x4203; -pub const PTRACE_GETREGSET: ::c_uint = 0x4204; -pub const PTRACE_SETREGSET: ::c_uint = 0x4205; -pub const PTRACE_SEIZE: ::c_uint = 0x4206; -pub const PTRACE_INTERRUPT: ::c_uint = 0x4207; -pub const PTRACE_LISTEN: ::c_uint = 0x4208; -pub const PTRACE_PEEKSIGINFO: ::c_uint = 0x4209; - -pub const MADV_DODUMP: ::c_int = 17; -pub const MADV_DONTDUMP: ::c_int = 16; - -pub const EPOLLWAKEUP: ::c_int = 0x20000000; - -pub const MADV_HUGEPAGE: ::c_int = 14; -pub const MADV_NOHUGEPAGE: ::c_int = 15; -pub const MAP_HUGETLB: ::c_int = 0x040000; - -pub const EFD_NONBLOCK: ::c_int = 0x800; - -pub const F_GETLK: ::c_int = 5; -pub const F_GETOWN: ::c_int = 9; -pub const F_SETOWN: ::c_int = 8; -pub const F_SETLK: ::c_int = 6; -pub const F_SETLKW: ::c_int = 7; - -pub const SEEK_DATA: ::c_int = 3; -pub const SEEK_HOLE: ::c_int = 4; - -pub const SFD_NONBLOCK: ::c_int = 0x0800; - -pub const TCSANOW: ::c_int = 0; -pub const TCSADRAIN: ::c_int = 1; -pub const TCSAFLUSH: ::c_int = 2; - -pub const TIOCGSOFTCAR: ::c_ulong = 0x5419; -pub const TIOCSSOFTCAR: ::c_ulong = 0x541A; -pub const TIOCLINUX: ::c_ulong = 0x541C; -pub const TIOCGSERIAL: ::c_ulong = 0x541E; -pub const TIOCEXCL: ::c_ulong = 0x540C; -pub const TIOCNXCL: ::c_ulong = 0x540D; -pub const TIOCSCTTY: ::c_ulong = 0x540E; -pub const TIOCSTI: ::c_ulong = 0x5412; -pub const TIOCMGET: ::c_ulong = 0x5415; -pub const TIOCMBIS: ::c_ulong = 0x5416; -pub const TIOCMBIC: ::c_ulong = 0x5417; -pub const TIOCMSET: ::c_ulong = 0x5418; -pub const TIOCCONS: ::c_ulong = 0x541D; - -pub const RTLD_DEEPBIND: ::c_int = 0x8; -pub const RTLD_GLOBAL: ::c_int = 0x100; -pub const RTLD_NOLOAD: ::c_int = 0x4; - -pub const LINUX_REBOOT_MAGIC1: ::c_int = 0xfee1dead; -pub const LINUX_REBOOT_MAGIC2: ::c_int = 672274793; -pub const LINUX_REBOOT_MAGIC2A: ::c_int = 85072278; -pub const LINUX_REBOOT_MAGIC2B: ::c_int = 369367448; -pub const LINUX_REBOOT_MAGIC2C: ::c_int = 537993216; - -pub const LINUX_REBOOT_CMD_RESTART: ::c_int = 0x01234567; -pub const LINUX_REBOOT_CMD_HALT: ::c_int = 0xCDEF0123; -pub const LINUX_REBOOT_CMD_CAD_ON: ::c_int = 0x89ABCDEF; -pub const LINUX_REBOOT_CMD_CAD_OFF: ::c_int = 0x00000000; -pub const LINUX_REBOOT_CMD_POWER_OFF: ::c_int = 0x4321FEDC; -pub const LINUX_REBOOT_CMD_RESTART2: ::c_int = 0xA1B2C3D4; -pub const LINUX_REBOOT_CMD_SW_SUSPEND: ::c_int = 0xD000FCE2; -pub const LINUX_REBOOT_CMD_KEXEC: ::c_int = 0x45584543; - -pub const NETLINK_ROUTE: ::c_int = 0; -pub const NETLINK_UNUSED: ::c_int = 1; -pub const NETLINK_USERSOCK: ::c_int = 2; -pub const NETLINK_FIREWALL: ::c_int = 3; -pub const NETLINK_SOCK_DIAG: ::c_int = 4; -pub const NETLINK_NFLOG: ::c_int = 5; -pub const NETLINK_XFRM: ::c_int = 6; -pub const NETLINK_SELINUX: ::c_int = 7; -pub const NETLINK_ISCSI: ::c_int = 8; -pub const NETLINK_AUDIT: ::c_int = 9; -pub const NETLINK_FIB_LOOKUP: ::c_int = 10; -pub const NETLINK_CONNECTOR: ::c_int = 11; -pub const NETLINK_NETFILTER: ::c_int = 12; -pub const NETLINK_IP6_FW: ::c_int = 13; -pub const NETLINK_DNRTMSG: ::c_int = 14; -pub const NETLINK_KOBJECT_UEVENT: ::c_int = 15; -pub const NETLINK_GENERIC: ::c_int = 16; -pub const NETLINK_SCSITRANSPORT: ::c_int = 18; -pub const NETLINK_ECRYPTFS: ::c_int = 19; -pub const NETLINK_RDMA: ::c_int = 20; -pub const NETLINK_CRYPTO: ::c_int = 21; -pub const NETLINK_INET_DIAG: ::c_int = NETLINK_SOCK_DIAG; - -pub const MAX_LINKS: ::c_int = 32; - -pub const NLM_F_REQUEST: ::c_int = 1; -pub const NLM_F_MULTI: ::c_int = 2; -pub const NLM_F_ACK: ::c_int = 4; -pub const NLM_F_ECHO: ::c_int = 8; -pub const NLM_F_DUMP_INTR: ::c_int = 16; -pub const NLM_F_DUMP_FILTERED: ::c_int = 32; - -pub const NLM_F_ROOT: ::c_int = 0x100; -pub const NLM_F_MATCH: ::c_int = 0x200; -pub const NLM_F_ATOMIC: ::c_int = 0x400; -pub const NLM_F_DUMP: ::c_int = NLM_F_ROOT | NLM_F_MATCH; - -pub const NLM_F_REPLACE: ::c_int = 0x100; -pub const NLM_F_EXCL: ::c_int = 0x200; -pub const NLM_F_CREATE: ::c_int = 0x400; -pub const NLM_F_APPEND: ::c_int = 0x800; - -pub const NLMSG_NOOP: ::c_int = 0x1; -pub const NLMSG_ERROR: ::c_int = 0x2; -pub const NLMSG_DONE: ::c_int = 0x3; -pub const NLMSG_OVERRUN: ::c_int = 0x4; -pub const NLMSG_MIN_TYPE: ::c_int = 0x10; - -pub const NETLINK_ADD_MEMBERSHIP: ::c_int = 1; -pub const NETLINK_DROP_MEMBERSHIP: ::c_int = 2; -pub const NETLINK_PKTINFO: ::c_int = 3; -pub const NETLINK_BROADCAST_ERROR: ::c_int = 4; -pub const NETLINK_NO_ENOBUFS: ::c_int = 5; -pub const NETLINK_RX_RING: ::c_int = 6; -pub const NETLINK_TX_RING: ::c_int = 7; -pub const NETLINK_LISTEN_ALL_NSID: ::c_int = 8; -pub const NETLINK_LIST_MEMBERSHIPS: ::c_int = 9; -pub const NETLINK_CAP_ACK: ::c_int = 10; - -pub const NLA_F_NESTED: ::c_int = 1 << 15; -pub const NLA_F_NET_BYTEORDER: ::c_int = 1 << 14; -pub const NLA_TYPE_MASK: ::c_int = !(NLA_F_NESTED | NLA_F_NET_BYTEORDER); - -cfg_if! { - if #[cfg(any(target_arch = "arm", target_arch = "x86", - target_arch = "x86_64"))] { - pub const PTHREAD_STACK_MIN: ::size_t = 16384; - } else { - pub const PTHREAD_STACK_MIN: ::size_t = 131072; - } -} - -extern { - pub fn utmpxname(file: *const ::c_char) -> ::c_int; - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn setutxent(); - pub fn endutxent(); - pub fn getpt() -> ::c_int; -} - -#[link(name = "util")] -extern { - pub fn sysctl(name: *mut ::c_int, - namelen: ::c_int, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *mut ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; - pub fn backtrace(buf: *mut *mut ::c_void, - sz: ::c_int) -> ::c_int; - pub fn glob64(pattern: *const ::c_char, - flags: ::c_int, - errfunc: ::dox::Option ::c_int>, - pglob: *mut glob64_t) -> ::c_int; - pub fn globfree64(pglob: *mut glob64_t); - pub fn ptrace(request: ::c_uint, ...) -> ::c_long; - pub fn pthread_attr_getaffinity_np(attr: *const ::pthread_attr_t, - cpusetsize: ::size_t, - cpuset: *mut ::cpu_set_t) -> ::c_int; - pub fn pthread_attr_setaffinity_np(attr: *mut ::pthread_attr_t, - cpusetsize: ::size_t, - cpuset: *const ::cpu_set_t) -> ::c_int; - pub fn getpriority(which: ::__priority_which_t, who: ::id_t) -> ::c_int; - pub fn setpriority(which: ::__priority_which_t, who: ::id_t, - prio: ::c_int) -> ::c_int; - pub fn pthread_getaffinity_np(thread: ::pthread_t, - cpusetsize: ::size_t, - cpuset: *mut ::cpu_set_t) -> ::c_int; - pub fn pthread_setaffinity_np(thread: ::pthread_t, - cpusetsize: ::size_t, - cpuset: *const ::cpu_set_t) -> ::c_int; - pub fn sched_getcpu() -> ::c_int; -} - -cfg_if! { - if #[cfg(any(target_arch = "x86", - target_arch = "arm", - target_arch = "powerpc"))] { - mod b32; - pub use self::b32::*; - } else if #[cfg(any(target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64"))] { - mod b64; - pub use self::b64::*; - } else { - // Unknown target_arch - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/s390x.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/s390x.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/linux/s390x.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/linux/s390x.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,698 +0,0 @@ -pub type blkcnt_t = i64; -pub type blksize_t = i64; -pub type c_char = u8; -pub type c_long = i64; -pub type c_ulong = u64; -pub type fsblkcnt_t = u64; -pub type fsfilcnt_t = u64; -pub type ino_t = u64; -pub type nlink_t = u64; -pub type off_t = i64; -pub type rlim_t = u64; -pub type suseconds_t = i64; -pub type time_t = i64; -pub type wchar_t = i32; -pub type greg_t = u64; -pub type clock_t = i64; -pub type __fsword_t = ::c_long; -pub type __priority_which_t = ::c_uint; - -s! { - pub struct aiocb { - pub aio_fildes: ::c_int, - pub aio_lio_opcode: ::c_int, - pub aio_reqprio: ::c_int, - pub aio_buf: *mut ::c_void, - pub aio_nbytes: ::size_t, - pub aio_sigevent: ::sigevent, - __next_prio: *mut aiocb, - __abs_prio: ::c_int, - __policy: ::c_int, - __error_code: ::c_int, - __return_value: ::ssize_t, - pub aio_offset: off_t, - #[cfg(target_pointer_width = "32")] - __unused1: [::c_char; 4], - __glibc_reserved: [::c_char; 32] - } - - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - st_pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - __glibc_reserved: [::c_long; 3], - } - - pub struct stat64 { - pub st_dev: ::dev_t, - pub st_ino: ::ino64_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - st_pad0: ::c_int, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt64_t, - __glibc_reserved: [::c_long; 3], - } - - pub struct pthread_attr_t { - __size: [::c_ulong; 7] - } - - pub struct sigaction { - pub sa_sigaction: ::sighandler_t, - __glibc_reserved0: ::c_int, - pub sa_flags: ::c_int, - _restorer: *mut ::c_void, - pub sa_mask: sigset_t, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_flags: ::c_int, - pub ss_size: ::size_t, - } - - pub struct sigset_t { - __size: [::c_ulong; 16], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_errno: ::c_int, - pub si_code: ::c_int, - _pad: ::c_int, - _pad2: [::c_long; 14], - } - - pub struct ipc_perm { - pub __key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::mode_t, - pub __seq: ::c_ushort, - __pad1: ::c_ushort, - __unused1: ::c_ulong, - __unused2: ::c_ulong - } - - pub struct shmid_ds { - pub shm_perm: ::ipc_perm, - pub shm_segsz: ::size_t, - pub shm_atime: ::time_t, - pub shm_dtime: ::time_t, - pub shm_ctime: ::time_t, - pub shm_cpid: ::pid_t, - pub shm_lpid: ::pid_t, - pub shm_nattch: ::shmatt_t, - __unused4: ::c_ulong, - __unused5: ::c_ulong - } - - pub struct statfs { - pub f_type: ::c_uint, - pub f_bsize: ::c_uint, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_fsid: ::fsid_t, - pub f_namelen: ::c_uint, - pub f_frsize: ::c_uint, - pub f_flags: ::c_uint, - f_spare: [::c_uint; 4], - } - - pub struct msghdr { - pub msg_name: *mut ::c_void, - pub msg_namelen: ::socklen_t, - pub msg_iov: *mut ::iovec, - pub msg_iovlen: ::size_t, - pub msg_control: *mut ::c_void, - pub msg_controllen: ::size_t, - pub msg_flags: ::c_int, - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_line: ::cc_t, - pub c_cc: [::cc_t; ::NCCS], - pub c_ispeed: ::speed_t, - pub c_ospeed: ::speed_t, - } - - pub struct sysinfo { - pub uptime: ::c_long, - pub loads: [::c_ulong; 3], - pub totalram: ::c_ulong, - pub freeram: ::c_ulong, - pub sharedram: ::c_ulong, - pub bufferram: ::c_ulong, - pub totalswap: ::c_ulong, - pub freeswap: ::c_ulong, - pub procs: ::c_ushort, - pub pad: ::c_ushort, - pub totalhigh: ::c_ulong, - pub freehigh: ::c_ulong, - pub mem_unit: ::c_uint, - pub _f: [::c_char; 0], - } - - pub struct glob64_t { - pub gl_pathc: ::size_t, - pub gl_pathv: *mut *mut ::c_char, - pub gl_offs: ::size_t, - pub gl_flags: ::c_int, - - __unused1: *mut ::c_void, - __unused2: *mut ::c_void, - __unused3: *mut ::c_void, - __unused4: *mut ::c_void, - __unused5: *mut ::c_void, - } - - pub struct ucred { - pub pid: ::pid_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - } - - pub struct flock { - pub l_type: ::c_short, - pub l_whence: ::c_short, - pub l_start: ::off_t, - pub l_len: ::off_t, - pub l_pid: ::pid_t, - } - - // FIXME this is actually a union - pub struct sem_t { - __size: [::c_char; 32], - __align: [::c_long; 0], - } - - pub struct __psw_t { - pub mask: u64, - pub addr: u64, - } - - // FIXME: This is actually a union. - pub struct fpreg_t { - pub d: ::c_double, - // f: ::c_float, - } - - pub struct fpregset_t { - pub fpc: u32, - __pad: u32, - pub fprs: [fpreg_t; 16], - } - - pub struct mcontext_t { - pub psw: __psw_t, - pub gregs: [u64; 16], - pub aregs: [u32; 16], - pub fpregs: fpregset_t, - } - - pub struct ucontext_t { - pub uc_flags: ::c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: ::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: ::sigset_t, - } - - pub struct msqid_ds { - pub msg_perm: ::ipc_perm, - pub msg_stime: ::time_t, - pub msg_rtime: ::time_t, - pub msg_ctime: ::time_t, - __msg_cbytes: ::c_ulong, - pub msg_qnum: ::msgqnum_t, - pub msg_qbytes: ::msglen_t, - pub msg_lspid: ::pid_t, - pub msg_lrpid: ::pid_t, - __glibc_reserved4: ::c_ulong, - __glibc_reserved5: ::c_ulong, - } -} - -pub const POSIX_FADV_DONTNEED: ::c_int = 6; -pub const POSIX_FADV_NOREUSE: ::c_int = 7; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; - -pub const EADDRINUSE: ::c_int = 98; -pub const EADDRNOTAVAIL: ::c_int = 99; -pub const ECONNABORTED: ::c_int = 103; -pub const ECONNREFUSED: ::c_int = 111; -pub const ECONNRESET: ::c_int = 104; -pub const EDEADLK: ::c_int = 35; -pub const ENOSYS: ::c_int = 38; -pub const ENOTCONN: ::c_int = 107; -pub const ETIMEDOUT: ::c_int = 110; -pub const FIOCLEX: ::c_ulong = 0x5451; -pub const FIONBIO: ::c_ulong = 0x5421; -pub const MAP_ANON: ::c_int = 0x20; -pub const O_ACCMODE: ::c_int = 3; -pub const O_APPEND: ::c_int = 1024; -pub const O_CREAT: ::c_int = 64; -pub const O_EXCL: ::c_int = 128; -pub const O_NONBLOCK: ::c_int = 2048; -pub const PTHREAD_STACK_MIN: ::size_t = 16384; -pub const RLIM_INFINITY: ::rlim_t = 0xffffffffffffffff; -pub const SA_NOCLDWAIT: ::c_int = 2; -pub const SA_ONSTACK: ::c_int = 0x08000000; -pub const SA_SIGINFO: ::c_int = 4; -pub const SIGBUS: ::c_int = 7; -pub const SIGSTKSZ: ::size_t = 0x2000; -pub const SIG_SETMASK: ::c_int = 2; -pub const SOCK_DGRAM: ::c_int = 2; -pub const SOCK_STREAM: ::c_int = 1; -pub const SOL_SOCKET: ::c_int = 1; -pub const SO_BROADCAST: ::c_int = 6; -pub const SO_ERROR: ::c_int = 4; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_REUSEADDR: ::c_int = 2; -pub const SO_SNDTIMEO: ::c_int = 21; - -pub const RLIMIT_RSS: ::c_int = 5; -pub const RLIMIT_NOFILE: ::c_int = 7; -pub const RLIMIT_AS: ::c_int = 9; -pub const RLIMIT_NPROC: ::c_int = 6; -pub const RLIMIT_MEMLOCK: ::c_int = 8; -pub const RLIMIT_RTTIME: ::c_int = 15; -pub const RLIMIT_NLIMITS: ::c_int = 16; - -pub const O_NOCTTY: ::c_int = 256; -pub const O_SYNC: ::c_int = 1052672; -pub const O_RSYNC: ::c_int = 1052672; -pub const O_DSYNC: ::c_int = 4096; -pub const O_FSYNC: ::c_int = 0x101000; -pub const O_DIRECT: ::c_int = 0x4000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_NOFOLLOW: ::c_int = 0x20000; - -pub const SOCK_NONBLOCK: ::c_int = O_NONBLOCK; - -pub const LC_PAPER: ::c_int = 7; -pub const LC_NAME: ::c_int = 8; -pub const LC_ADDRESS: ::c_int = 9; -pub const LC_TELEPHONE: ::c_int = 10; -pub const LC_MEASUREMENT: ::c_int = 11; -pub const LC_IDENTIFICATION: ::c_int = 12; -pub const LC_PAPER_MASK: ::c_int = (1 << LC_PAPER); -pub const LC_NAME_MASK: ::c_int = (1 << LC_NAME); -pub const LC_ADDRESS_MASK: ::c_int = (1 << LC_ADDRESS); -pub const LC_TELEPHONE_MASK: ::c_int = (1 << LC_TELEPHONE); -pub const LC_MEASUREMENT_MASK: ::c_int = (1 << LC_MEASUREMENT); -pub const LC_IDENTIFICATION_MASK: ::c_int = (1 << LC_IDENTIFICATION); -pub const LC_ALL_MASK: ::c_int = ::LC_CTYPE_MASK - | ::LC_NUMERIC_MASK - | ::LC_TIME_MASK - | ::LC_COLLATE_MASK - | ::LC_MONETARY_MASK - | ::LC_MESSAGES_MASK - | LC_PAPER_MASK - | LC_NAME_MASK - | LC_ADDRESS_MASK - | LC_TELEPHONE_MASK - | LC_MEASUREMENT_MASK - | LC_IDENTIFICATION_MASK; - -pub const MAP_ANONYMOUS: ::c_int = 0x0020; -pub const MAP_GROWSDOWN: ::c_int = 0x0100; -pub const MAP_DENYWRITE: ::c_int = 0x0800; -pub const MAP_EXECUTABLE: ::c_int = 0x01000; -pub const MAP_LOCKED: ::c_int = 0x02000; -pub const MAP_NORESERVE: ::c_int = 0x04000; -pub const MAP_POPULATE: ::c_int = 0x08000; -pub const MAP_NONBLOCK: ::c_int = 0x010000; -pub const MAP_STACK: ::c_int = 0x020000; - -pub const EDEADLOCK: ::c_int = 35; -pub const ENAMETOOLONG: ::c_int = 36; -pub const ENOLCK: ::c_int = 37; -pub const ENOTEMPTY: ::c_int = 39; -pub const ELOOP: ::c_int = 40; -pub const ENOMSG: ::c_int = 42; -pub const EIDRM: ::c_int = 43; -pub const ECHRNG: ::c_int = 44; -pub const EL2NSYNC: ::c_int = 45; -pub const EL3HLT: ::c_int = 46; -pub const EL3RST: ::c_int = 47; -pub const ELNRNG: ::c_int = 48; -pub const EUNATCH: ::c_int = 49; -pub const ENOCSI: ::c_int = 50; -pub const EL2HLT: ::c_int = 51; -pub const EBADE: ::c_int = 52; -pub const EBADR: ::c_int = 53; -pub const EXFULL: ::c_int = 54; -pub const ENOANO: ::c_int = 55; -pub const EBADRQC: ::c_int = 56; -pub const EBADSLT: ::c_int = 57; -pub const EMULTIHOP: ::c_int = 72; -pub const EOVERFLOW: ::c_int = 75; -pub const ENOTUNIQ: ::c_int = 76; -pub const EBADFD: ::c_int = 77; -pub const EBADMSG: ::c_int = 74; -pub const EREMCHG: ::c_int = 78; -pub const ELIBACC: ::c_int = 79; -pub const ELIBBAD: ::c_int = 80; -pub const ELIBSCN: ::c_int = 81; -pub const ELIBMAX: ::c_int = 82; -pub const ELIBEXEC: ::c_int = 83; -pub const EILSEQ: ::c_int = 84; -pub const ERESTART: ::c_int = 85; -pub const ESTRPIPE: ::c_int = 86; -pub const EUSERS: ::c_int = 87; -pub const ENOTSOCK: ::c_int = 88; -pub const EDESTADDRREQ: ::c_int = 89; -pub const EMSGSIZE: ::c_int = 90; -pub const EPROTOTYPE: ::c_int = 91; -pub const ENOPROTOOPT: ::c_int = 92; -pub const EPROTONOSUPPORT: ::c_int = 93; -pub const ESOCKTNOSUPPORT: ::c_int = 94; -pub const EOPNOTSUPP: ::c_int = 95; -pub const ENOTSUP: ::c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: ::c_int = 96; -pub const EAFNOSUPPORT: ::c_int = 97; -pub const ENETDOWN: ::c_int = 100; -pub const ENETUNREACH: ::c_int = 101; -pub const ENETRESET: ::c_int = 102; -pub const ENOBUFS: ::c_int = 105; -pub const EISCONN: ::c_int = 106; -pub const ESHUTDOWN: ::c_int = 108; -pub const ETOOMANYREFS: ::c_int = 109; -pub const EHOSTDOWN: ::c_int = 112; -pub const EHOSTUNREACH: ::c_int = 113; -pub const EALREADY: ::c_int = 114; -pub const EINPROGRESS: ::c_int = 115; -pub const ESTALE: ::c_int = 116; -pub const EUCLEAN: ::c_int = 117; -pub const ENOTNAM: ::c_int = 118; -pub const ENAVAIL: ::c_int = 119; -pub const EISNAM: ::c_int = 120; -pub const EREMOTEIO: ::c_int = 121; -pub const EDQUOT: ::c_int = 122; -pub const ENOMEDIUM: ::c_int = 123; -pub const EMEDIUMTYPE: ::c_int = 124; -pub const ECANCELED: ::c_int = 125; -pub const ENOKEY: ::c_int = 126; -pub const EKEYEXPIRED: ::c_int = 127; -pub const EKEYREVOKED: ::c_int = 128; -pub const EKEYREJECTED: ::c_int = 129; -pub const EOWNERDEAD: ::c_int = 130; -pub const ENOTRECOVERABLE: ::c_int = 131; -pub const EHWPOISON: ::c_int = 133; -pub const ERFKILL: ::c_int = 132; - -pub const SOCK_SEQPACKET: ::c_int = 5; - -pub const SO_TYPE: ::c_int = 3; -pub const SO_DONTROUTE: ::c_int = 5; -pub const SO_SNDBUF: ::c_int = 7; -pub const SO_RCVBUF: ::c_int = 8; -pub const SO_KEEPALIVE: ::c_int = 9; -pub const SO_OOBINLINE: ::c_int = 10; -pub const SO_LINGER: ::c_int = 13; -pub const SO_REUSEPORT: ::c_int = 15; -pub const SO_PEERCRED: ::c_int = 17; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_ACCEPTCONN: ::c_int = 30; - -pub const TCP_COOKIE_TRANSACTIONS: ::c_int = 15; -pub const TCP_THIN_LINEAR_TIMEOUTS: ::c_int = 16; -pub const TCP_THIN_DUPACK: ::c_int = 17; -pub const TCP_USER_TIMEOUT: ::c_int = 18; -pub const TCP_REPAIR: ::c_int = 19; -pub const TCP_REPAIR_QUEUE: ::c_int = 20; -pub const TCP_QUEUE_SEQ: ::c_int = 21; -pub const TCP_REPAIR_OPTIONS: ::c_int = 22; -pub const TCP_FASTOPEN: ::c_int = 23; -pub const TCP_TIMESTAMP: ::c_int = 24; - -pub const SIGCHLD: ::c_int = 17; -pub const SIGUSR1: ::c_int = 10; -pub const SIGUSR2: ::c_int = 12; -pub const SIGCONT: ::c_int = 18; -pub const SIGSTOP: ::c_int = 19; -pub const SIGTSTP: ::c_int = 20; -pub const SIGURG: ::c_int = 23; -pub const SIGIO: ::c_int = 29; -pub const SIGSYS: ::c_int = 31; -pub const SIGSTKFLT: ::c_int = 16; -pub const SIGUNUSED: ::c_int = 31; -pub const SIGTTIN: ::c_int = 21; -pub const SIGTTOU: ::c_int = 22; -pub const SIGXCPU: ::c_int = 24; -pub const SIGXFSZ: ::c_int = 25; -pub const SIGVTALRM: ::c_int = 26; -pub const SIGPROF: ::c_int = 27; -pub const SIGWINCH: ::c_int = 28; -pub const SIGPOLL: ::c_int = 29; -pub const SIGPWR: ::c_int = 30; -pub const SIG_BLOCK: ::c_int = 0x000000; -pub const SIG_UNBLOCK: ::c_int = 0x01; - -pub const FALLOC_FL_KEEP_SIZE: ::c_int = 0x01; -pub const FALLOC_FL_PUNCH_HOLE: ::c_int = 0x02; - -pub const BUFSIZ: ::c_uint = 8192; -pub const TMP_MAX: ::c_uint = 238328; -pub const FOPEN_MAX: ::c_uint = 16; -pub const POSIX_MADV_DONTNEED: ::c_int = 4; -pub const _SC_2_C_VERSION: ::c_int = 96; -pub const O_ASYNC: ::c_int = 0x2000; -pub const O_NDELAY: ::c_int = 0x800; -pub const ST_RELATIME: ::c_ulong = 4096; -pub const NI_MAXHOST: ::socklen_t = 1025; - -pub const ADFS_SUPER_MAGIC: ::c_int = 0x0000adf5; -pub const AFFS_SUPER_MAGIC: ::c_int = 0x0000adff; -pub const CODA_SUPER_MAGIC: ::c_int = 0x73757245; -pub const CRAMFS_MAGIC: ::c_int = 0x28cd3d45; -pub const EFS_SUPER_MAGIC: ::c_int = 0x00414a53; -pub const EXT2_SUPER_MAGIC: ::c_int = 0x0000ef53; -pub const EXT3_SUPER_MAGIC: ::c_int = 0x0000ef53; -pub const EXT4_SUPER_MAGIC: ::c_int = 0x0000ef53; -pub const HPFS_SUPER_MAGIC: ::c_int = 0xf995e849; -pub const HUGETLBFS_MAGIC: ::c_int = 0x958458f6; -pub const ISOFS_SUPER_MAGIC: ::c_int = 0x00009660; -pub const JFFS2_SUPER_MAGIC: ::c_int = 0x000072b6; -pub const MINIX_SUPER_MAGIC: ::c_int = 0x0000137f; -pub const MINIX_SUPER_MAGIC2: ::c_int = 0x0000138f; -pub const MINIX2_SUPER_MAGIC: ::c_int = 0x00002468; -pub const MINIX2_SUPER_MAGIC2: ::c_int = 0x00002478; -pub const MSDOS_SUPER_MAGIC: ::c_int = 0x00004d44; -pub const NCP_SUPER_MAGIC: ::c_int = 0x0000564c; -pub const NFS_SUPER_MAGIC: ::c_int = 0x00006969; -pub const OPENPROM_SUPER_MAGIC: ::c_int = 0x00009fa1; -pub const PROC_SUPER_MAGIC: ::c_int = 0x00009fa0; -pub const QNX4_SUPER_MAGIC: ::c_int = 0x0000002f; -pub const REISERFS_SUPER_MAGIC: ::c_int = 0x52654973; -pub const SMB_SUPER_MAGIC: ::c_int = 0x0000517b; -pub const TMPFS_MAGIC: ::c_int = 0x01021994; -pub const USBDEVICE_SUPER_MAGIC: ::c_int = 0x00009fa2; - -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; -pub const IUTF8: ::tcflag_t = 0x00004000; - -pub const CPU_SETSIZE: ::c_int = 0x400; - -pub const EXTPROC: ::tcflag_t = 0x00010000; - -pub const QFMT_VFS_V1: ::c_int = 4; - -pub const PTRACE_TRACEME: ::c_uint = 0; -pub const PTRACE_PEEKTEXT: ::c_uint = 1; -pub const PTRACE_PEEKDATA: ::c_uint = 2; -pub const PTRACE_PEEKUSER: ::c_uint = 3; -pub const PTRACE_POKETEXT: ::c_uint = 4; -pub const PTRACE_POKEDATA: ::c_uint = 5; -pub const PTRACE_POKEUSER: ::c_uint = 6; -pub const PTRACE_CONT: ::c_uint = 7; -pub const PTRACE_KILL: ::c_uint = 8; -pub const PTRACE_SINGLESTEP: ::c_uint = 9; -pub const PTRACE_GETREGS: ::c_uint = 12; -pub const PTRACE_SETREGS: ::c_uint = 13; -pub const PTRACE_GETFPREGS: ::c_uint = 14; -pub const PTRACE_SETFPREGS: ::c_uint = 15; -pub const PTRACE_ATTACH: ::c_uint = 16; -pub const PTRACE_DETACH: ::c_uint = 17; -pub const PTRACE_SYSCALL: ::c_uint = 24; -pub const PTRACE_SETOPTIONS: ::c_uint = 0x4200; -pub const PTRACE_GETEVENTMSG: ::c_uint = 0x4201; -pub const PTRACE_GETSIGINFO: ::c_uint = 0x4202; -pub const PTRACE_SETSIGINFO: ::c_uint = 0x4203; -pub const PTRACE_GETREGSET: ::c_uint = 0x4204; -pub const PTRACE_SETREGSET: ::c_uint = 0x4205; -pub const PTRACE_SEIZE: ::c_uint = 0x4206; -pub const PTRACE_INTERRUPT: ::c_uint = 0x4207; -pub const PTRACE_LISTEN: ::c_uint = 0x4208; -pub const PTRACE_PEEKSIGINFO: ::c_uint = 0x4209; - -pub const MADV_DODUMP: ::c_int = 17; -pub const MADV_DONTDUMP: ::c_int = 16; - -pub const EPOLLWAKEUP: ::c_int = 0x20000000; - -pub const MADV_HUGEPAGE: ::c_int = 14; -pub const MADV_NOHUGEPAGE: ::c_int = 15; -pub const MAP_HUGETLB: ::c_int = 0x040000; - -pub const EFD_NONBLOCK: ::c_int = 0x800; - -pub const F_GETLK: ::c_int = 5; -pub const F_GETOWN: ::c_int = 9; -pub const F_SETOWN: ::c_int = 8; -pub const F_SETLK: ::c_int = 6; -pub const F_SETLKW: ::c_int = 7; - -pub const SEEK_DATA: ::c_int = 3; -pub const SEEK_HOLE: ::c_int = 4; - -pub const SFD_NONBLOCK: ::c_int = 0x0800; - -pub const TCSANOW: ::c_int = 0; -pub const TCSADRAIN: ::c_int = 1; -pub const TCSAFLUSH: ::c_int = 2; - -pub const TCGETS: ::c_ulong = 0x5401; -pub const TCSETS: ::c_ulong = 0x5402; -pub const TCSETSW: ::c_ulong = 0x5403; -pub const TCSETSF: ::c_ulong = 0x5404; -pub const TCGETA: ::c_ulong = 0x5405; -pub const TCSETA: ::c_ulong = 0x5406; -pub const TCSETAW: ::c_ulong = 0x5407; -pub const TCSETAF: ::c_ulong = 0x5408; -pub const TCSBRK: ::c_ulong = 0x5409; -pub const TCXONC: ::c_ulong = 0x540A; -pub const TCFLSH: ::c_ulong = 0x540B; -pub const TIOCGSOFTCAR: ::c_ulong = 0x5419; -pub const TIOCSSOFTCAR: ::c_ulong = 0x541A; -pub const TIOCINQ: ::c_ulong = 0x541B; -pub const TIOCLINUX: ::c_ulong = 0x541C; -pub const TIOCGSERIAL: ::c_ulong = 0x541E; -pub const TIOCEXCL: ::c_ulong = 0x540C; -pub const TIOCNXCL: ::c_ulong = 0x540D; -pub const TIOCSCTTY: ::c_ulong = 0x540E; -pub const TIOCGPGRP: ::c_ulong = 0x540F; -pub const TIOCSPGRP: ::c_ulong = 0x5410; -pub const TIOCOUTQ: ::c_ulong = 0x5411; -pub const TIOCSTI: ::c_ulong = 0x5412; -pub const TIOCGWINSZ: ::c_ulong = 0x5413; -pub const TIOCSWINSZ: ::c_ulong = 0x5414; -pub const TIOCMGET: ::c_ulong = 0x5415; -pub const TIOCMBIS: ::c_ulong = 0x5416; -pub const TIOCMBIC: ::c_ulong = 0x5417; -pub const TIOCMSET: ::c_ulong = 0x5418; -pub const FIONREAD: ::c_ulong = 0x541B; -pub const TIOCCONS: ::c_ulong = 0x541D; - -pub const RTLD_DEEPBIND: ::c_int = 0x8; -pub const RTLD_GLOBAL: ::c_int = 0x100; -pub const RTLD_NOLOAD: ::c_int = 0x4; - -pub const LINUX_REBOOT_MAGIC1: ::c_int = 0xfee1dead; -pub const LINUX_REBOOT_MAGIC2: ::c_int = 672274793; -pub const LINUX_REBOOT_MAGIC2A: ::c_int = 85072278; -pub const LINUX_REBOOT_MAGIC2B: ::c_int = 369367448; -pub const LINUX_REBOOT_MAGIC2C: ::c_int = 537993216; - -pub const LINUX_REBOOT_CMD_RESTART: ::c_int = 0x01234567; -pub const LINUX_REBOOT_CMD_HALT: ::c_int = 0xCDEF0123; -pub const LINUX_REBOOT_CMD_CAD_ON: ::c_int = 0x89ABCDEF; -pub const LINUX_REBOOT_CMD_CAD_OFF: ::c_int = 0x00000000; -pub const LINUX_REBOOT_CMD_POWER_OFF: ::c_int = 0x4321FEDC; -pub const LINUX_REBOOT_CMD_RESTART2: ::c_int = 0xA1B2C3D4; -pub const LINUX_REBOOT_CMD_SW_SUSPEND: ::c_int = 0xD000FCE2; -pub const LINUX_REBOOT_CMD_KEXEC: ::c_int = 0x45584543; - -pub const SYS_gettid: ::c_long = 236; -pub const SYS_perf_event_open: ::c_long = 331; - -#[link(name = "util")] -extern { - pub fn sysctl(name: *mut ::c_int, - namelen: ::c_int, - oldp: *mut ::c_void, - oldlenp: *mut ::size_t, - newp: *mut ::c_void, - newlen: ::size_t) - -> ::c_int; - pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; - pub fn backtrace(buf: *mut *mut ::c_void, - sz: ::c_int) -> ::c_int; - pub fn glob64(pattern: *const ::c_char, - flags: ::c_int, - errfunc: ::dox::Option ::c_int>, - pglob: *mut glob64_t) -> ::c_int; - pub fn globfree64(pglob: *mut glob64_t); - pub fn ptrace(request: ::c_uint, ...) -> ::c_long; - pub fn pthread_attr_getaffinity_np(attr: *const ::pthread_attr_t, - cpusetsize: ::size_t, - cpuset: *mut ::cpu_set_t) -> ::c_int; - pub fn pthread_attr_setaffinity_np(attr: *mut ::pthread_attr_t, - cpusetsize: ::size_t, - cpuset: *const ::cpu_set_t) -> ::c_int; - pub fn getpriority(which: ::__priority_which_t, who: ::id_t) -> ::c_int; - pub fn setpriority(which: ::__priority_which_t, who: ::id_t, - prio: ::c_int) -> ::c_int; - pub fn pthread_getaffinity_np(thread: ::pthread_t, - cpusetsize: ::size_t, - cpuset: *mut ::cpu_set_t) -> ::c_int; - pub fn pthread_setaffinity_np(thread: ::pthread_t, - cpusetsize: ::size_t, - cpuset: *const ::cpu_set_t) -> ::c_int; - pub fn sched_getcpu() -> ::c_int; - pub fn getcontext(ucp: *mut ucontext_t) -> ::c_int; - pub fn setcontext(ucp: *const ucontext_t) -> ::c_int; - pub fn makecontext(ucp: *mut ucontext_t, - func: extern fn (), - argc: ::c_int, ...); - pub fn swapcontext(uocp: *mut ucontext_t, - ucp: *const ucontext_t) -> ::c_int; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/notbsd/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/notbsd/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,890 +0,0 @@ -use dox::mem; - -pub type sa_family_t = u16; -pub type pthread_key_t = ::c_uint; -pub type speed_t = ::c_uint; -pub type tcflag_t = ::c_uint; -pub type loff_t = ::c_longlong; -pub type clockid_t = ::c_int; -pub type key_t = ::c_int; -pub type id_t = ::c_uint; - -pub enum timezone {} - -s! { - pub struct sockaddr { - pub sa_family: sa_family_t, - pub sa_data: [::c_char; 14], - } - - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: ::in_port_t, - pub sin_addr: ::in_addr, - pub sin_zero: [u8; 8], - } - - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: ::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: ::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_un { - pub sun_family: sa_family_t, - pub sun_path: [::c_char; 108] - } - - pub struct sockaddr_storage { - pub ss_family: sa_family_t, - __ss_align: ::size_t, - #[cfg(target_pointer_width = "32")] - __ss_pad2: [u8; 128 - 2 * 4], - #[cfg(target_pointer_width = "64")] - __ss_pad2: [u8; 128 - 2 * 8], - } - - pub struct addrinfo { - pub ai_flags: ::c_int, - pub ai_family: ::c_int, - pub ai_socktype: ::c_int, - pub ai_protocol: ::c_int, - pub ai_addrlen: socklen_t, - - #[cfg(any(target_os = "linux", - target_os = "emscripten", - target_os = "fuchsia"))] - pub ai_addr: *mut ::sockaddr, - - pub ai_canonname: *mut c_char, - - #[cfg(target_os = "android")] - pub ai_addr: *mut ::sockaddr, - - pub ai_next: *mut addrinfo, - } - - pub struct sockaddr_nl { - pub nl_family: ::sa_family_t, - nl_pad: ::c_ushort, - pub nl_pid: u32, - pub nl_groups: u32 - } - - pub struct sockaddr_ll { - pub sll_family: ::c_ushort, - pub sll_protocol: ::c_ushort, - pub sll_ifindex: ::c_int, - pub sll_hatype: ::c_ushort, - pub sll_pkttype: ::c_uchar, - pub sll_halen: ::c_uchar, - pub sll_addr: [::c_uchar; 8] - } - - pub struct fd_set { - fds_bits: [::c_ulong; FD_SETSIZE / ULONG_SIZE], - } - - pub struct tm { - pub tm_sec: ::c_int, - pub tm_min: ::c_int, - pub tm_hour: ::c_int, - pub tm_mday: ::c_int, - pub tm_mon: ::c_int, - pub tm_year: ::c_int, - pub tm_wday: ::c_int, - pub tm_yday: ::c_int, - pub tm_isdst: ::c_int, - pub tm_gmtoff: ::c_long, - pub tm_zone: *const ::c_char, - } - - pub struct sched_param { - pub sched_priority: ::c_int, - #[cfg(any(target_env = "musl"))] - pub sched_ss_low_priority: ::c_int, - #[cfg(any(target_env = "musl"))] - pub sched_ss_repl_period: ::timespec, - #[cfg(any(target_env = "musl"))] - pub sched_ss_init_budget: ::timespec, - #[cfg(any(target_env = "musl"))] - pub sched_ss_max_repl: ::c_int, - } - - pub struct Dl_info { - pub dli_fname: *const ::c_char, - pub dli_fbase: *mut ::c_void, - pub dli_sname: *const ::c_char, - pub dli_saddr: *mut ::c_void, - } - - #[cfg_attr(any(all(target_arch = "x86", not(target_env = "musl")), - target_arch = "x86_64"), - repr(packed))] - pub struct epoll_event { - pub events: ::uint32_t, - pub u64: ::uint64_t, - } - - pub struct utsname { - pub sysname: [::c_char; 65], - pub nodename: [::c_char; 65], - pub release: [::c_char; 65], - pub version: [::c_char; 65], - pub machine: [::c_char; 65], - pub domainname: [::c_char; 65] - } - - pub struct lconv { - pub decimal_point: *mut ::c_char, - pub thousands_sep: *mut ::c_char, - pub grouping: *mut ::c_char, - pub int_curr_symbol: *mut ::c_char, - pub currency_symbol: *mut ::c_char, - pub mon_decimal_point: *mut ::c_char, - pub mon_thousands_sep: *mut ::c_char, - pub mon_grouping: *mut ::c_char, - pub positive_sign: *mut ::c_char, - pub negative_sign: *mut ::c_char, - pub int_frac_digits: ::c_char, - pub frac_digits: ::c_char, - pub p_cs_precedes: ::c_char, - pub p_sep_by_space: ::c_char, - pub n_cs_precedes: ::c_char, - pub n_sep_by_space: ::c_char, - pub p_sign_posn: ::c_char, - pub n_sign_posn: ::c_char, - pub int_p_cs_precedes: ::c_char, - pub int_p_sep_by_space: ::c_char, - pub int_n_cs_precedes: ::c_char, - pub int_n_sep_by_space: ::c_char, - pub int_p_sign_posn: ::c_char, - pub int_n_sign_posn: ::c_char, - } - - pub struct sigevent { - pub sigev_value: ::sigval, - pub sigev_signo: ::c_int, - pub sigev_notify: ::c_int, - // Actually a union. We only expose sigev_notify_thread_id because it's - // the most useful member - pub sigev_notify_thread_id: ::c_int, - #[cfg(target_pointer_width = "64")] - __unused1: [::c_int; 11], - #[cfg(target_pointer_width = "32")] - __unused1: [::c_int; 12] - } -} - -// intentionally not public, only used for fd_set -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - const ULONG_SIZE: usize = 32; - } else if #[cfg(target_pointer_width = "64")] { - const ULONG_SIZE: usize = 64; - } else { - // Unknown target_pointer_width - } -} - -pub const EXIT_FAILURE: ::c_int = 1; -pub const EXIT_SUCCESS: ::c_int = 0; -pub const RAND_MAX: ::c_int = 2147483647; -pub const EOF: ::c_int = -1; -pub const SEEK_SET: ::c_int = 0; -pub const SEEK_CUR: ::c_int = 1; -pub const SEEK_END: ::c_int = 2; -pub const _IOFBF: ::c_int = 0; -pub const _IONBF: ::c_int = 2; -pub const _IOLBF: ::c_int = 1; - -pub const F_DUPFD: ::c_int = 0; -pub const F_GETFD: ::c_int = 1; -pub const F_SETFD: ::c_int = 2; -pub const F_GETFL: ::c_int = 3; -pub const F_SETFL: ::c_int = 4; - -// Linux-specific fcntls -pub const F_SETLEASE: ::c_int = 1024; -pub const F_GETLEASE: ::c_int = 1025; -pub const F_NOTIFY: ::c_int = 1026; -pub const F_DUPFD_CLOEXEC: ::c_int = 1030; -pub const F_SETPIPE_SZ: ::c_int = 1031; -pub const F_GETPIPE_SZ: ::c_int = 1032; - -// TODO(#235): Include file sealing fcntls once we have a way to verify them. - -pub const SIGTRAP: ::c_int = 5; - -pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0; -pub const PTHREAD_CREATE_DETACHED: ::c_int = 1; - -pub const CLOCK_REALTIME: clockid_t = 0; -pub const CLOCK_MONOTONIC: clockid_t = 1; -pub const CLOCK_PROCESS_CPUTIME_ID: clockid_t = 2; -pub const CLOCK_THREAD_CPUTIME_ID: clockid_t = 3; -pub const CLOCK_MONOTONIC_RAW: clockid_t = 4; -pub const CLOCK_REALTIME_COARSE: clockid_t = 5; -pub const CLOCK_MONOTONIC_COARSE: clockid_t = 6; -pub const CLOCK_BOOTTIME: clockid_t = 7; -pub const CLOCK_REALTIME_ALARM: clockid_t = 8; -pub const CLOCK_BOOTTIME_ALARM: clockid_t = 9; -// TODO(#247) Someday our Travis shall have glibc 2.21 (released in Sep -// 2014.) See also musl/mod.rs -// pub const CLOCK_SGI_CYCLE: clockid_t = 10; -// pub const CLOCK_TAI: clockid_t = 11; -pub const TIMER_ABSTIME: ::c_int = 1; - -pub const RLIMIT_CPU: ::c_int = 0; -pub const RLIMIT_FSIZE: ::c_int = 1; -pub const RLIMIT_DATA: ::c_int = 2; -pub const RLIMIT_STACK: ::c_int = 3; -pub const RLIMIT_CORE: ::c_int = 4; -pub const RLIMIT_LOCKS: ::c_int = 10; -pub const RLIMIT_SIGPENDING: ::c_int = 11; -pub const RLIMIT_MSGQUEUE: ::c_int = 12; -pub const RLIMIT_NICE: ::c_int = 13; -pub const RLIMIT_RTPRIO: ::c_int = 14; - -pub const RUSAGE_SELF: ::c_int = 0; - -pub const O_RDONLY: ::c_int = 0; -pub const O_WRONLY: ::c_int = 1; -pub const O_RDWR: ::c_int = 2; -pub const O_TRUNC: ::c_int = 512; -pub const O_CLOEXEC: ::c_int = 0x80000; - -pub const SOCK_CLOEXEC: ::c_int = O_CLOEXEC; - -pub const S_IFIFO: ::mode_t = 4096; -pub const S_IFCHR: ::mode_t = 8192; -pub const S_IFBLK: ::mode_t = 24576; -pub const S_IFDIR: ::mode_t = 16384; -pub const S_IFREG: ::mode_t = 32768; -pub const S_IFLNK: ::mode_t = 40960; -pub const S_IFSOCK: ::mode_t = 49152; -pub const S_IFMT: ::mode_t = 61440; -pub const S_IRWXU: ::mode_t = 448; -pub const S_IXUSR: ::mode_t = 64; -pub const S_IWUSR: ::mode_t = 128; -pub const S_IRUSR: ::mode_t = 256; -pub const S_IRWXG: ::mode_t = 56; -pub const S_IXGRP: ::mode_t = 8; -pub const S_IWGRP: ::mode_t = 16; -pub const S_IRGRP: ::mode_t = 32; -pub const S_IRWXO: ::mode_t = 7; -pub const S_IXOTH: ::mode_t = 1; -pub const S_IWOTH: ::mode_t = 2; -pub const S_IROTH: ::mode_t = 4; -pub const F_OK: ::c_int = 0; -pub const R_OK: ::c_int = 4; -pub const W_OK: ::c_int = 2; -pub const X_OK: ::c_int = 1; -pub const STDIN_FILENO: ::c_int = 0; -pub const STDOUT_FILENO: ::c_int = 1; -pub const STDERR_FILENO: ::c_int = 2; -pub const SIGHUP: ::c_int = 1; -pub const SIGINT: ::c_int = 2; -pub const SIGQUIT: ::c_int = 3; -pub const SIGILL: ::c_int = 4; -pub const SIGABRT: ::c_int = 6; -pub const SIGFPE: ::c_int = 8; -pub const SIGKILL: ::c_int = 9; -pub const SIGSEGV: ::c_int = 11; -pub const SIGPIPE: ::c_int = 13; -pub const SIGALRM: ::c_int = 14; -pub const SIGTERM: ::c_int = 15; - -pub const PROT_NONE: ::c_int = 0; -pub const PROT_READ: ::c_int = 1; -pub const PROT_WRITE: ::c_int = 2; -pub const PROT_EXEC: ::c_int = 4; - -pub const LC_CTYPE: ::c_int = 0; -pub const LC_NUMERIC: ::c_int = 1; -pub const LC_TIME: ::c_int = 2; -pub const LC_COLLATE: ::c_int = 3; -pub const LC_MONETARY: ::c_int = 4; -pub const LC_MESSAGES: ::c_int = 5; -pub const LC_ALL: ::c_int = 6; -pub const LC_CTYPE_MASK: ::c_int = (1 << LC_CTYPE); -pub const LC_NUMERIC_MASK: ::c_int = (1 << LC_NUMERIC); -pub const LC_TIME_MASK: ::c_int = (1 << LC_TIME); -pub const LC_COLLATE_MASK: ::c_int = (1 << LC_COLLATE); -pub const LC_MONETARY_MASK: ::c_int = (1 << LC_MONETARY); -pub const LC_MESSAGES_MASK: ::c_int = (1 << LC_MESSAGES); -// LC_ALL_MASK defined per platform - -pub const MAP_FILE: ::c_int = 0x0000; -pub const MAP_SHARED: ::c_int = 0x0001; -pub const MAP_PRIVATE: ::c_int = 0x0002; -pub const MAP_FIXED: ::c_int = 0x0010; - -pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void; - -// MS_ flags for msync(2) -pub const MS_ASYNC: ::c_int = 0x0001; -pub const MS_INVALIDATE: ::c_int = 0x0002; -pub const MS_SYNC: ::c_int = 0x0004; - -// MS_ flags for mount(2) -pub const MS_RDONLY: ::c_ulong = 0x01; -pub const MS_NOSUID: ::c_ulong = 0x02; -pub const MS_NODEV: ::c_ulong = 0x04; -pub const MS_NOEXEC: ::c_ulong = 0x08; -pub const MS_SYNCHRONOUS: ::c_ulong = 0x10; -pub const MS_REMOUNT: ::c_ulong = 0x20; -pub const MS_MANDLOCK: ::c_ulong = 0x40; -pub const MS_DIRSYNC: ::c_ulong = 0x80; -pub const MS_NOATIME: ::c_ulong = 0x0400; -pub const MS_NODIRATIME: ::c_ulong = 0x0800; -pub const MS_BIND: ::c_ulong = 0x1000; -pub const MS_MOVE: ::c_ulong = 0x2000; -pub const MS_REC: ::c_ulong = 0x4000; -pub const MS_SILENT: ::c_ulong = 0x8000; -pub const MS_POSIXACL: ::c_ulong = 0x010000; -pub const MS_UNBINDABLE: ::c_ulong = 0x020000; -pub const MS_PRIVATE: ::c_ulong = 0x040000; -pub const MS_SLAVE: ::c_ulong = 0x080000; -pub const MS_SHARED: ::c_ulong = 0x100000; -pub const MS_RELATIME: ::c_ulong = 0x200000; -pub const MS_KERNMOUNT: ::c_ulong = 0x400000; -pub const MS_I_VERSION: ::c_ulong = 0x800000; -pub const MS_STRICTATIME: ::c_ulong = 0x1000000; -pub const MS_ACTIVE: ::c_ulong = 0x40000000; -pub const MS_NOUSER: ::c_ulong = 0x80000000; -pub const MS_MGC_VAL: ::c_ulong = 0xc0ed0000; -pub const MS_MGC_MSK: ::c_ulong = 0xffff0000; -pub const MS_RMT_MASK: ::c_ulong = 0x800051; - -pub const EPERM: ::c_int = 1; -pub const ENOENT: ::c_int = 2; -pub const ESRCH: ::c_int = 3; -pub const EINTR: ::c_int = 4; -pub const EIO: ::c_int = 5; -pub const ENXIO: ::c_int = 6; -pub const E2BIG: ::c_int = 7; -pub const ENOEXEC: ::c_int = 8; -pub const EBADF: ::c_int = 9; -pub const ECHILD: ::c_int = 10; -pub const EAGAIN: ::c_int = 11; -pub const ENOMEM: ::c_int = 12; -pub const EACCES: ::c_int = 13; -pub const EFAULT: ::c_int = 14; -pub const ENOTBLK: ::c_int = 15; -pub const EBUSY: ::c_int = 16; -pub const EEXIST: ::c_int = 17; -pub const EXDEV: ::c_int = 18; -pub const ENODEV: ::c_int = 19; -pub const ENOTDIR: ::c_int = 20; -pub const EISDIR: ::c_int = 21; -pub const EINVAL: ::c_int = 22; -pub const ENFILE: ::c_int = 23; -pub const EMFILE: ::c_int = 24; -pub const ENOTTY: ::c_int = 25; -pub const ETXTBSY: ::c_int = 26; -pub const EFBIG: ::c_int = 27; -pub const ENOSPC: ::c_int = 28; -pub const ESPIPE: ::c_int = 29; -pub const EROFS: ::c_int = 30; -pub const EMLINK: ::c_int = 31; -pub const EPIPE: ::c_int = 32; -pub const EDOM: ::c_int = 33; -pub const ERANGE: ::c_int = 34; -pub const EWOULDBLOCK: ::c_int = EAGAIN; - -pub const EBFONT: ::c_int = 59; -pub const ENOSTR: ::c_int = 60; -pub const ENODATA: ::c_int = 61; -pub const ETIME: ::c_int = 62; -pub const ENOSR: ::c_int = 63; -pub const ENONET: ::c_int = 64; -pub const ENOPKG: ::c_int = 65; -pub const EREMOTE: ::c_int = 66; -pub const ENOLINK: ::c_int = 67; -pub const EADV: ::c_int = 68; -pub const ESRMNT: ::c_int = 69; -pub const ECOMM: ::c_int = 70; -pub const EPROTO: ::c_int = 71; -pub const EDOTDOT: ::c_int = 73; - -pub const AF_PACKET: ::c_int = 17; -pub const IPPROTO_RAW: ::c_int = 255; - -pub const PROT_GROWSDOWN: ::c_int = 0x1000000; -pub const PROT_GROWSUP: ::c_int = 0x2000000; - -pub const MAP_TYPE: ::c_int = 0x000f; - -pub const MADV_NORMAL: ::c_int = 0; -pub const MADV_RANDOM: ::c_int = 1; -pub const MADV_SEQUENTIAL: ::c_int = 2; -pub const MADV_WILLNEED: ::c_int = 3; -pub const MADV_DONTNEED: ::c_int = 4; -pub const MADV_REMOVE: ::c_int = 9; -pub const MADV_DONTFORK: ::c_int = 10; -pub const MADV_DOFORK: ::c_int = 11; -pub const MADV_MERGEABLE: ::c_int = 12; -pub const MADV_UNMERGEABLE: ::c_int = 13; -pub const MADV_HWPOISON: ::c_int = 100; - -pub const IFF_UP: ::c_int = 0x1; -pub const IFF_BROADCAST: ::c_int = 0x2; -pub const IFF_DEBUG: ::c_int = 0x4; -pub const IFF_LOOPBACK: ::c_int = 0x8; -pub const IFF_POINTOPOINT: ::c_int = 0x10; -pub const IFF_NOTRAILERS: ::c_int = 0x20; -pub const IFF_RUNNING: ::c_int = 0x40; -pub const IFF_NOARP: ::c_int = 0x80; -pub const IFF_PROMISC: ::c_int = 0x100; -pub const IFF_ALLMULTI: ::c_int = 0x200; -pub const IFF_MASTER: ::c_int = 0x400; -pub const IFF_SLAVE: ::c_int = 0x800; -pub const IFF_MULTICAST: ::c_int = 0x1000; -pub const IFF_PORTSEL: ::c_int = 0x2000; -pub const IFF_AUTOMEDIA: ::c_int = 0x4000; -pub const IFF_DYNAMIC: ::c_int = 0x8000; - -pub const AF_UNIX: ::c_int = 1; -pub const AF_INET: ::c_int = 2; -pub const AF_INET6: ::c_int = 10; -pub const AF_NETLINK: ::c_int = 16; -pub const SOCK_RAW: ::c_int = 3; -pub const IPPROTO_TCP: ::c_int = 6; -pub const IPPROTO_IP: ::c_int = 0; -pub const IPPROTO_IPV6: ::c_int = 41; -pub const IP_MULTICAST_TTL: ::c_int = 33; -pub const IP_MULTICAST_LOOP: ::c_int = 34; -pub const IP_TTL: ::c_int = 2; -pub const IP_HDRINCL: ::c_int = 3; -pub const IP_ADD_MEMBERSHIP: ::c_int = 35; -pub const IP_DROP_MEMBERSHIP: ::c_int = 36; -pub const IP_TRANSPARENT: ::c_int = 19; -pub const IPV6_ADD_MEMBERSHIP: ::c_int = 20; -pub const IPV6_DROP_MEMBERSHIP: ::c_int = 21; - -pub const TCP_NODELAY: ::c_int = 1; -pub const TCP_MAXSEG: ::c_int = 2; -pub const TCP_CORK: ::c_int = 3; -pub const TCP_KEEPIDLE: ::c_int = 4; -pub const TCP_KEEPINTVL: ::c_int = 5; -pub const TCP_KEEPCNT: ::c_int = 6; -pub const TCP_SYNCNT: ::c_int = 7; -pub const TCP_LINGER2: ::c_int = 8; -pub const TCP_DEFER_ACCEPT: ::c_int = 9; -pub const TCP_WINDOW_CLAMP: ::c_int = 10; -pub const TCP_INFO: ::c_int = 11; -pub const TCP_QUICKACK: ::c_int = 12; -pub const TCP_CONGESTION: ::c_int = 13; - -pub const IPV6_MULTICAST_LOOP: ::c_int = 19; -pub const IPV6_V6ONLY: ::c_int = 26; - -pub const SO_DEBUG: ::c_int = 1; - -pub const MSG_NOSIGNAL: ::c_int = 0x4000; - -pub const SHUT_RD: ::c_int = 0; -pub const SHUT_WR: ::c_int = 1; -pub const SHUT_RDWR: ::c_int = 2; - -pub const LOCK_SH: ::c_int = 1; -pub const LOCK_EX: ::c_int = 2; -pub const LOCK_NB: ::c_int = 4; -pub const LOCK_UN: ::c_int = 8; - -pub const SA_NODEFER: ::c_int = 0x40000000; -pub const SA_RESETHAND: ::c_int = 0x80000000; -pub const SA_RESTART: ::c_int = 0x10000000; -pub const SA_NOCLDSTOP: ::c_int = 0x00000001; - -pub const SS_ONSTACK: ::c_int = 1; -pub const SS_DISABLE: ::c_int = 2; - -pub const PATH_MAX: ::c_int = 4096; - -pub const FD_SETSIZE: usize = 1024; - -pub const EPOLLIN: ::c_int = 0x1; -pub const EPOLLPRI: ::c_int = 0x2; -pub const EPOLLOUT: ::c_int = 0x4; -pub const EPOLLRDNORM: ::c_int = 0x40; -pub const EPOLLRDBAND: ::c_int = 0x80; -pub const EPOLLWRNORM: ::c_int = 0x100; -pub const EPOLLWRBAND: ::c_int = 0x200; -pub const EPOLLMSG: ::c_int = 0x400; -pub const EPOLLERR: ::c_int = 0x8; -pub const EPOLLHUP: ::c_int = 0x10; -pub const EPOLLET: ::c_int = 0x80000000; - -pub const EPOLL_CTL_ADD: ::c_int = 1; -pub const EPOLL_CTL_MOD: ::c_int = 3; -pub const EPOLL_CTL_DEL: ::c_int = 2; - -pub const EPOLL_CLOEXEC: ::c_int = 0x80000; - -pub const MNT_DETACH: ::c_int = 0x2; -pub const MNT_EXPIRE: ::c_int = 0x4; - -pub const Q_GETFMT: ::c_int = 0x800004; -pub const Q_GETINFO: ::c_int = 0x800005; -pub const Q_SETINFO: ::c_int = 0x800006; -pub const QIF_BLIMITS: ::uint32_t = 1; -pub const QIF_SPACE: ::uint32_t = 2; -pub const QIF_ILIMITS: ::uint32_t = 4; -pub const QIF_INODES: ::uint32_t = 8; -pub const QIF_BTIME: ::uint32_t = 16; -pub const QIF_ITIME: ::uint32_t = 32; -pub const QIF_LIMITS: ::uint32_t = 5; -pub const QIF_USAGE: ::uint32_t = 10; -pub const QIF_TIMES: ::uint32_t = 48; -pub const QIF_ALL: ::uint32_t = 63; - -pub const EFD_CLOEXEC: ::c_int = 0x80000; - -pub const MNT_FORCE: ::c_int = 0x1; - -pub const Q_SYNC: ::c_int = 0x800001; -pub const Q_QUOTAON: ::c_int = 0x800002; -pub const Q_QUOTAOFF: ::c_int = 0x800003; -pub const Q_GETQUOTA: ::c_int = 0x800007; -pub const Q_SETQUOTA: ::c_int = 0x800008; - -pub const TCIOFF: ::c_int = 2; -pub const TCION: ::c_int = 3; -pub const TCOOFF: ::c_int = 0; -pub const TCOON: ::c_int = 1; -pub const TCIFLUSH: ::c_int = 0; -pub const TCOFLUSH: ::c_int = 1; -pub const TCIOFLUSH: ::c_int = 2; -pub const NL0: ::c_int = 0x00000000; -pub const NL1: ::c_int = 0x00000100; -pub const TAB0: ::c_int = 0x00000000; -pub const CR0: ::c_int = 0x00000000; -pub const FF0: ::c_int = 0x00000000; -pub const BS0: ::c_int = 0x00000000; -pub const VT0: ::c_int = 0x00000000; -pub const VERASE: usize = 2; -pub const VKILL: usize = 3; -pub const VINTR: usize = 0; -pub const VQUIT: usize = 1; -pub const VLNEXT: usize = 15; -pub const IGNBRK: ::tcflag_t = 0x00000001; -pub const BRKINT: ::tcflag_t = 0x00000002; -pub const IGNPAR: ::tcflag_t = 0x00000004; -pub const PARMRK: ::tcflag_t = 0x00000008; -pub const INPCK: ::tcflag_t = 0x00000010; -pub const ISTRIP: ::tcflag_t = 0x00000020; -pub const INLCR: ::tcflag_t = 0x00000040; -pub const IGNCR: ::tcflag_t = 0x00000080; -pub const ICRNL: ::tcflag_t = 0x00000100; -pub const IXANY: ::tcflag_t = 0x00000800; -pub const IMAXBEL: ::tcflag_t = 0x00002000; -pub const OPOST: ::tcflag_t = 0x1; -pub const CS5: ::tcflag_t = 0x00000000; -pub const CRTSCTS: ::tcflag_t = 0x80000000; -pub const ECHO: ::tcflag_t = 0x00000008; - -pub const CLONE_VM: ::c_int = 0x100; -pub const CLONE_FS: ::c_int = 0x200; -pub const CLONE_FILES: ::c_int = 0x400; -pub const CLONE_SIGHAND: ::c_int = 0x800; -pub const CLONE_PTRACE: ::c_int = 0x2000; -pub const CLONE_VFORK: ::c_int = 0x4000; -pub const CLONE_PARENT: ::c_int = 0x8000; -pub const CLONE_THREAD: ::c_int = 0x10000; -pub const CLONE_NEWNS: ::c_int = 0x20000; -pub const CLONE_SYSVSEM: ::c_int = 0x40000; -pub const CLONE_SETTLS: ::c_int = 0x80000; -pub const CLONE_PARENT_SETTID: ::c_int = 0x100000; -pub const CLONE_CHILD_CLEARTID: ::c_int = 0x200000; -pub const CLONE_DETACHED: ::c_int = 0x400000; -pub const CLONE_UNTRACED: ::c_int = 0x800000; -pub const CLONE_CHILD_SETTID: ::c_int = 0x01000000; -pub const CLONE_NEWUTS: ::c_int = 0x04000000; -pub const CLONE_NEWIPC: ::c_int = 0x08000000; -pub const CLONE_NEWUSER: ::c_int = 0x10000000; -pub const CLONE_NEWPID: ::c_int = 0x20000000; -pub const CLONE_NEWNET: ::c_int = 0x40000000; -pub const CLONE_IO: ::c_int = 0x80000000; - -pub const WNOHANG: ::c_int = 0x00000001; -pub const WUNTRACED: ::c_int = 0x00000002; -pub const WSTOPPED: ::c_int = WUNTRACED; -pub const WEXITED: ::c_int = 0x00000004; -pub const WCONTINUED: ::c_int = 0x00000008; -pub const WNOWAIT: ::c_int = 0x01000000; - -pub const __WNOTHREAD: ::c_int = 0x20000000; -pub const __WALL: ::c_int = 0x40000000; -pub const __WCLONE: ::c_int = 0x80000000; - -pub const SPLICE_F_MOVE: ::c_uint = 0x01; -pub const SPLICE_F_NONBLOCK: ::c_uint = 0x02; -pub const SPLICE_F_MORE: ::c_uint = 0x04; -pub const SPLICE_F_GIFT: ::c_uint = 0x08; - -pub const RTLD_LOCAL: ::c_int = 0; - -pub const POSIX_FADV_NORMAL: ::c_int = 0; -pub const POSIX_FADV_RANDOM: ::c_int = 1; -pub const POSIX_FADV_SEQUENTIAL: ::c_int = 2; -pub const POSIX_FADV_WILLNEED: ::c_int = 3; - -pub const AT_FDCWD: ::c_int = -100; -pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x100; - -pub const LOG_CRON: ::c_int = 9 << 3; -pub const LOG_AUTHPRIV: ::c_int = 10 << 3; -pub const LOG_FTP: ::c_int = 11 << 3; -pub const LOG_PERROR: ::c_int = 0x20; - -pub const PIPE_BUF: usize = 4096; - -pub const SI_LOAD_SHIFT: ::c_uint = 16; - -pub const SIGEV_SIGNAL: ::c_int = 0; -pub const SIGEV_NONE: ::c_int = 1; -pub const SIGEV_THREAD: ::c_int = 2; - -f! { - pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = mem::size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] &= !(1 << (fd % size)); - return - } - - pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool { - let fd = fd as usize; - let size = mem::size_of_val(&(*set).fds_bits[0]) * 8; - return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0 - } - - pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = mem::size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] |= 1 << (fd % size); - return - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } - - pub fn WIFSTOPPED(status: ::c_int) -> bool { - (status & 0xff) == 0x7f - } - - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { - (status >> 8) & 0xff - } - - pub fn WIFSIGNALED(status: ::c_int) -> bool { - (status & 0x7f) + 1 >= 2 - } - - pub fn WTERMSIG(status: ::c_int) -> ::c_int { - status & 0x7f - } - - pub fn WIFEXITED(status: ::c_int) -> bool { - (status & 0x7f) == 0 - } - - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { - (status >> 8) & 0xff - } - - pub fn WCOREDUMP(status: ::c_int) -> bool { - (status & 0x80) != 0 - } -} - -extern { - pub fn getpwnam_r(name: *const ::c_char, - pwd: *mut passwd, - buf: *mut ::c_char, - buflen: ::size_t, - result: *mut *mut passwd) -> ::c_int; - pub fn getpwuid_r(uid: ::uid_t, - pwd: *mut passwd, - buf: *mut ::c_char, - buflen: ::size_t, - result: *mut *mut passwd) -> ::c_int; - pub fn fdatasync(fd: ::c_int) -> ::c_int; - pub fn mincore(addr: *mut ::c_void, len: ::size_t, - vec: *mut ::c_uchar) -> ::c_int; - pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_nanosleep(clk_id: clockid_t, - flags: ::c_int, - rqtp: *const ::timespec, - rmtp: *mut ::timespec) -> ::c_int; - pub fn prctl(option: ::c_int, ...) -> ::c_int; - pub fn pthread_getattr_np(native: ::pthread_t, - attr: *mut ::pthread_attr_t) -> ::c_int; - pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t, - guardsize: *mut ::size_t) -> ::c_int; - pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t, - stackaddr: *mut *mut ::c_void, - stacksize: *mut ::size_t) -> ::c_int; - pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void; - pub fn setgroups(ngroups: ::size_t, - ptr: *const ::gid_t) -> ::c_int; - pub fn sched_setscheduler(pid: ::pid_t, - policy: ::c_int, - param: *const sched_param) -> ::c_int; - pub fn sched_getscheduler(pid: ::pid_t) -> ::c_int; - pub fn sched_get_priority_max(policy: ::c_int) -> ::c_int; - pub fn sched_get_priority_min(policy: ::c_int) -> ::c_int; - pub fn epoll_create(size: ::c_int) -> ::c_int; - pub fn epoll_create1(flags: ::c_int) -> ::c_int; - pub fn epoll_ctl(epfd: ::c_int, - op: ::c_int, - fd: ::c_int, - event: *mut epoll_event) -> ::c_int; - pub fn epoll_wait(epfd: ::c_int, - events: *mut epoll_event, - maxevents: ::c_int, - timeout: ::c_int) -> ::c_int; - pub fn pipe2(fds: *mut ::c_int, flags: ::c_int) -> ::c_int; - pub fn mount(src: *const ::c_char, - target: *const ::c_char, - fstype: *const ::c_char, - flags: ::c_ulong, - data: *const ::c_void) -> ::c_int; - pub fn umount(target: *const ::c_char) -> ::c_int; - pub fn umount2(target: *const ::c_char, flags: ::c_int) -> ::c_int; - pub fn clone(cb: extern fn(*mut ::c_void) -> ::c_int, - child_stack: *mut ::c_void, - flags: ::c_int, - arg: *mut ::c_void, ...) -> ::c_int; - pub fn statfs(path: *const ::c_char, buf: *mut statfs) -> ::c_int; - pub fn fstatfs(fd: ::c_int, buf: *mut statfs) -> ::c_int; - pub fn memrchr(cx: *const ::c_void, - c: ::c_int, - n: ::size_t) -> *mut ::c_void; - pub fn syscall(num: ::c_long, ...) -> ::c_long; - pub fn sendfile(out_fd: ::c_int, - in_fd: ::c_int, - offset: *mut off_t, - count: ::size_t) -> ::ssize_t; - pub fn splice(fd_in: ::c_int, - off_in: *mut ::loff_t, - fd_out: ::c_int, - off_out: *mut ::loff_t, - len: ::size_t, - flags: ::c_uint) -> ::ssize_t; - pub fn tee(fd_in: ::c_int, - fd_out: ::c_int, - len: ::size_t, - flags: ::c_uint) -> ::ssize_t; - pub fn vmsplice(fd: ::c_int, - iov: *const ::iovec, - nr_segs: ::size_t, - flags: ::c_uint) -> ::ssize_t; - - pub fn posix_fadvise(fd: ::c_int, offset: ::off_t, len: ::off_t, - advise: ::c_int) -> ::c_int; - pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int; - pub fn utimensat(dirfd: ::c_int, path: *const ::c_char, - times: *const ::timespec, flag: ::c_int) -> ::c_int; - pub fn duplocale(base: ::locale_t) -> ::locale_t; - pub fn freelocale(loc: ::locale_t); - pub fn newlocale(mask: ::c_int, - locale: *const ::c_char, - base: ::locale_t) -> ::locale_t; - pub fn uselocale(loc: ::locale_t) -> ::locale_t; - pub fn creat64(path: *const c_char, mode: mode_t) -> ::c_int; - pub fn fstat64(fildes: ::c_int, buf: *mut stat64) -> ::c_int; - pub fn ftruncate64(fd: ::c_int, length: off64_t) -> ::c_int; - pub fn getrlimit64(resource: ::c_int, rlim: *mut rlimit64) -> ::c_int; - pub fn lseek64(fd: ::c_int, offset: off64_t, whence: ::c_int) -> off64_t; - pub fn lstat64(path: *const c_char, buf: *mut stat64) -> ::c_int; - pub fn mmap64(addr: *mut ::c_void, - len: ::size_t, - prot: ::c_int, - flags: ::c_int, - fd: ::c_int, - offset: off64_t) - -> *mut ::c_void; - pub fn open64(path: *const c_char, oflag: ::c_int, ...) -> ::c_int; - pub fn pread64(fd: ::c_int, buf: *mut ::c_void, count: ::size_t, - offset: off64_t) -> ::ssize_t; - pub fn pwrite64(fd: ::c_int, buf: *const ::c_void, count: ::size_t, - offset: off64_t) -> ::ssize_t; - pub fn readdir64_r(dirp: *mut ::DIR, entry: *mut ::dirent64, - result: *mut *mut ::dirent64) -> ::c_int; - pub fn setrlimit64(resource: ::c_int, rlim: *const rlimit64) -> ::c_int; - pub fn stat64(path: *const c_char, buf: *mut stat64) -> ::c_int; - pub fn eventfd(init: ::c_uint, flags: ::c_int) -> ::c_int; - pub fn sysinfo (info: *mut ::sysinfo) -> ::c_int; - - pub fn openat(dirfd: ::c_int, pathname: *const ::c_char, - flags: ::c_int, ...) -> ::c_int; - pub fn faccessat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::c_int, flags: ::c_int) -> ::c_int; - pub fn fchmodat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t, flags: ::c_int) -> ::c_int; - pub fn fchownat(dirfd: ::c_int, pathname: *const ::c_char, - owner: ::uid_t, group: ::gid_t, - flags: ::c_int) -> ::c_int; - pub fn fstatat(dirfd: ::c_int, pathname: *const ::c_char, - buf: *mut stat, flags: ::c_int) -> ::c_int; - pub fn linkat(olddirfd: ::c_int, oldpath: *const ::c_char, - newdirfd: ::c_int, newpath: *const ::c_char, - flags: ::c_int) -> ::c_int; - pub fn mkdirat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t) -> ::c_int; - pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t, dev: dev_t) -> ::c_int; - pub fn readlinkat(dirfd: ::c_int, pathname: *const ::c_char, - buf: *mut ::c_char, bufsiz: ::size_t) -> ::ssize_t; - pub fn renameat(olddirfd: ::c_int, oldpath: *const ::c_char, - newdirfd: ::c_int, newpath: *const ::c_char) - -> ::c_int; - pub fn symlinkat(target: *const ::c_char, newdirfd: ::c_int, - linkpath: *const ::c_char) -> ::c_int; - pub fn unlinkat(dirfd: ::c_int, pathname: *const ::c_char, - flags: ::c_int) -> ::c_int; - pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t, - clock_id: *mut clockid_t) -> ::c_int; - pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, - clock_id: clockid_t) -> ::c_int; - pub fn sched_getaffinity(pid: ::pid_t, - cpusetsize: ::size_t, - cpuset: *mut cpu_set_t) -> ::c_int; - pub fn sched_setaffinity(pid: ::pid_t, - cpusetsize: ::size_t, - cpuset: *const cpu_set_t) -> ::c_int; - pub fn unshare(flags: ::c_int) -> ::c_int; - pub fn setns(fd: ::c_int, nstype: ::c_int) -> ::c_int; - pub fn sem_timedwait(sem: *mut sem_t, - abstime: *const ::timespec) -> ::c_int; - pub fn accept4(fd: ::c_int, addr: *mut ::sockaddr, len: *mut ::socklen_t, - flg: ::c_int) -> ::c_int; - pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, - abstime: *const ::timespec) -> ::c_int; - pub fn ptsname_r(fd: ::c_int, - buf: *mut ::c_char, - buflen: ::size_t) -> ::c_int; -} - -cfg_if! { - if #[cfg(any(target_os = "linux", - target_os = "emscripten", - target_os = "fuchsia"))] { - mod linux; - pub use self::linux::*; - } else if #[cfg(target_os = "android")] { - mod android; - pub use self::android::*; - } else { - // Unknown target_os - } -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/unix/solaris/mod.rs cargo-0.19.0/vendor/libc-0.2.18/src/unix/solaris/mod.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/unix/solaris/mod.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/unix/solaris/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1048 +0,0 @@ -pub type c_char = i8; -pub type c_long = i64; -pub type c_ulong = u64; -pub type clockid_t = ::c_int; - -pub type blkcnt_t = i64; -pub type clock_t = i64; -pub type daddr_t = i64; -pub type dev_t = u64; -pub type fsblkcnt_t = u64; -pub type fsfilcnt_t = u64; -pub type ino_t = i64; -pub type key_t = i32; -pub type major_t = u32; -pub type minor_t = u32; -pub type mode_t = u32; -pub type nlink_t = u32; -pub type rlim_t = u64; -pub type speed_t = u32; -pub type tcflag_t = u32; -pub type time_t = i64; -pub type wchar_t = i32; -pub type nfds_t = ::c_ulong; - -pub type suseconds_t = ::c_long; -pub type off_t = i64; -pub type useconds_t = ::c_uint; -pub type socklen_t = u32; -pub type sa_family_t = u8; -pub type pthread_t = ::uintptr_t; -pub type pthread_key_t = ::c_uint; -pub type blksize_t = u32; -pub type fflags_t = u32; -pub type nl_item = ::c_int; - -pub enum timezone {} - -s! { - pub struct sockaddr { - pub sa_family: sa_family_t, - pub sa_data: [::c_char; 14], - } - - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: ::in_port_t, - pub sin_addr: ::in_addr, - pub sin_zero: [::c_char; 8] - } - - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: ::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: ::in6_addr, - pub sin6_scope_id: u32, - pub __sin6_src_id: u32 - } - - pub struct sockaddr_un { - pub sun_family: sa_family_t, - pub sun_path: [c_char; 108] - } - - pub struct passwd { - pub pw_name: *mut ::c_char, - pub pw_passwd: *mut ::c_char, - pub pw_uid: ::uid_t, - pub pw_gid: ::gid_t, - pub pw_age: *mut ::c_char, - pub pw_comment: *mut ::c_char, - pub pw_gecos: *mut ::c_char, - pub pw_dir: *mut ::c_char, - pub pw_shell: *mut ::c_char - } - - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut ::c_char, - pub ifa_flags: ::c_ulong, - pub ifa_addr: *mut ::sockaddr, - pub ifa_netmask: *mut ::sockaddr, - pub ifa_dstaddr: *mut ::sockaddr, - pub ifa_data: *mut ::c_void - } - - pub struct tm { - pub tm_sec: ::c_int, - pub tm_min: ::c_int, - pub tm_hour: ::c_int, - pub tm_mday: ::c_int, - pub tm_mon: ::c_int, - pub tm_year: ::c_int, - pub tm_wday: ::c_int, - pub tm_yday: ::c_int, - pub tm_isdst: ::c_int - } - - pub struct utsname { - pub sysname: [::c_char; 257], - pub nodename: [::c_char; 257], - pub release: [::c_char; 257], - pub version: [::c_char; 257], - pub machine: [::c_char; 257], - } - - pub struct msghdr { - pub msg_name: *mut ::c_void, - pub msg_namelen: ::socklen_t, - pub msg_iov: *mut ::iovec, - pub msg_iovlen: ::c_int, - pub msg_control: *mut ::c_void, - pub msg_controllen: ::socklen_t, - pub msg_flags: ::c_int, - } - - pub struct fd_set { - fds_bits: [i32; FD_SETSIZE / 32], - } - - pub struct pthread_attr_t { - __pthread_attrp: *mut ::c_void - } - - pub struct pthread_mutex_t { - __pthread_mutex_flag1: u16, - __pthread_mutex_flag2: u8, - __pthread_mutex_ceiling: u8, - __pthread_mutex_type: u16, - __pthread_mutex_magic: u16, - __pthread_mutex_lock: u64, - __pthread_mutex_data: u64 - } - - pub struct pthread_mutexattr_t { - __pthread_mutexattrp: *mut ::c_void - } - - pub struct pthread_cond_t { - __pthread_cond_flag: [u8; 4], - __pthread_cond_type: u16, - __pthread_cond_magic: u16, - __pthread_cond_data: u64 - } - - pub struct pthread_condattr_t { - __pthread_condattrp: *mut ::c_void, - } - - pub struct pthread_rwlock_t { - __pthread_rwlock_readers: i32, - __pthread_rwlock_type: u16, - __pthread_rwlock_magic: u16, - __pthread_rwlock_mutex: ::pthread_mutex_t, - __pthread_rwlock_readercv: ::pthread_cond_t, - __pthread_rwlock_writercv: ::pthread_cond_t - } - - pub struct dirent { - pub d_ino: ::ino_t, - pub d_off: ::off_t, - pub d_reclen: u16, - pub d_name: [::c_char; 1] - } - - pub struct glob_t { - pub gl_pathc: ::size_t, - pub gl_pathv: *mut *mut ::c_char, - pub gl_offs: ::size_t, - __unused1: *mut ::c_void, - __unused2: ::c_int, - __unused3: ::c_int, - __unused4: ::c_int, - __unused5: *mut ::c_void, - __unused6: *mut ::c_void, - __unused7: *mut ::c_void, - __unused8: *mut ::c_void, - __unused9: *mut ::c_void, - __unused10: *mut ::c_void, - } - - pub struct sockaddr_storage { - pub ss_family: ::sa_family_t, - __ss_pad1: [u8; 6], - __ss_align: i64, - __ss_pad2: [u8; 240], - } - - pub struct addrinfo { - pub ai_flags: ::c_int, - pub ai_family: ::c_int, - pub ai_socktype: ::c_int, - pub ai_protocol: ::c_int, - pub ai_addrlen: ::socklen_t, - pub ai_canonname: *mut ::c_char, - pub ai_addr: *mut ::sockaddr, - pub ai_next: *mut addrinfo, - } - - pub struct sigset_t { - bits: [u32; 4], - } - - pub struct siginfo_t { - pub si_signo: ::c_int, - pub si_code: ::c_int, - pub si_errno: ::c_int, - pub si_pad: ::c_int, - pub si_addr: *mut ::c_void, - __pad: [u8; 232], - } - - pub struct sigaction { - pub sa_flags: ::c_int, - pub sa_sigaction: ::sighandler_t, - pub sa_mask: sigset_t, - } - - pub struct stack_t { - pub ss_sp: *mut ::c_void, - pub ss_size: ::size_t, - pub ss_flags: ::c_int, - } - - pub struct statvfs { - pub f_bsize: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_favail: ::fsfilcnt_t, - pub f_fsid: ::c_ulong, - pub f_basetype: [::c_char; 16], - pub f_flag: ::c_ulong, - pub f_namemax: ::c_ulong, - pub f_fstr: [::c_char; 32] - } - - pub struct sched_param { - pub sched_priority: ::c_int, - sched_pad: [::c_int; 8] - } - - pub struct Dl_info { - pub dli_fname: *const ::c_char, - pub dli_fbase: *mut ::c_void, - pub dli_sname: *const ::c_char, - pub dli_saddr: *mut ::c_void, - } - - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - pub st_size: ::off_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_blksize: ::blksize_t, - pub st_blocks: ::blkcnt_t, - __unused: [::c_char; 16] - } - - pub struct termios { - pub c_iflag: ::tcflag_t, - pub c_oflag: ::tcflag_t, - pub c_cflag: ::tcflag_t, - pub c_lflag: ::tcflag_t, - pub c_cc: [::cc_t; ::NCCS] - } - - pub struct lconv { - pub decimal_point: *mut ::c_char, - pub thousands_sep: *mut ::c_char, - pub grouping: *mut ::c_char, - pub int_curr_symbol: *mut ::c_char, - pub currency_symbol: *mut ::c_char, - pub mon_decimal_point: *mut ::c_char, - pub mon_thousands_sep: *mut ::c_char, - pub mon_grouping: *mut ::c_char, - pub positive_sign: *mut ::c_char, - pub negative_sign: *mut ::c_char, - pub int_frac_digits: ::c_char, - pub frac_digits: ::c_char, - pub p_cs_precedes: ::c_char, - pub p_sep_by_space: ::c_char, - pub n_cs_precedes: ::c_char, - pub n_sep_by_space: ::c_char, - pub p_sign_posn: ::c_char, - pub n_sign_posn: ::c_char, - pub int_p_cs_precedes: ::c_char, - pub int_p_sep_by_space: ::c_char, - pub int_n_cs_precedes: ::c_char, - pub int_n_sep_by_space: ::c_char, - pub int_p_sign_posn: ::c_char, - pub int_n_sign_posn: ::c_char, - } - - pub struct sem_t { - pub sem_count: u32, - pub sem_type: u16, - pub sem_magic: u16, - pub sem_pad1: [u64; 3], - pub sem_pad2: [u64; 2] - } - - pub struct flock { - pub l_type: ::c_short, - pub l_whence: ::c_short, - pub l_start: ::off_t, - pub l_len: ::off_t, - pub l_sysid: ::c_int, - pub l_pid: ::pid_t, - pub l_pad: [::c_long; 4] - } - - pub struct if_nameindex { - pub if_index: ::c_uint, - pub if_name: *mut ::c_char, - } -} - -pub const LC_CTYPE: ::c_int = 0; -pub const LC_NUMERIC: ::c_int = 1; -pub const LC_TIME: ::c_int = 2; -pub const LC_COLLATE: ::c_int = 3; -pub const LC_MONETARY: ::c_int = 4; -pub const LC_MESSAGES: ::c_int = 5; -pub const LC_ALL: ::c_int = 6; -pub const LC_CTYPE_MASK: ::c_int = (1 << LC_CTYPE); -pub const LC_NUMERIC_MASK: ::c_int = (1 << LC_NUMERIC); -pub const LC_TIME_MASK: ::c_int = (1 << LC_TIME); -pub const LC_COLLATE_MASK: ::c_int = (1 << LC_COLLATE); -pub const LC_MONETARY_MASK: ::c_int = (1 << LC_MONETARY); -pub const LC_MESSAGES_MASK: ::c_int = (1 << LC_MESSAGES); -pub const LC_ALL_MASK: ::c_int = LC_CTYPE_MASK - | LC_NUMERIC_MASK - | LC_TIME_MASK - | LC_COLLATE_MASK - | LC_MONETARY_MASK - | LC_MESSAGES_MASK; - -pub const DAY_1: ::nl_item = 1; -pub const DAY_2: ::nl_item = 2; -pub const DAY_3: ::nl_item = 3; -pub const DAY_4: ::nl_item = 4; -pub const DAY_5: ::nl_item = 5; -pub const DAY_6: ::nl_item = 6; -pub const DAY_7: ::nl_item = 7; - -pub const ABDAY_1: ::nl_item = 8; -pub const ABDAY_2: ::nl_item = 9; -pub const ABDAY_3: ::nl_item = 10; -pub const ABDAY_4: ::nl_item = 11; -pub const ABDAY_5: ::nl_item = 12; -pub const ABDAY_6: ::nl_item = 13; -pub const ABDAY_7: ::nl_item = 14; - -pub const MON_1: ::nl_item = 15; -pub const MON_2: ::nl_item = 16; -pub const MON_3: ::nl_item = 17; -pub const MON_4: ::nl_item = 18; -pub const MON_5: ::nl_item = 19; -pub const MON_6: ::nl_item = 20; -pub const MON_7: ::nl_item = 21; -pub const MON_8: ::nl_item = 22; -pub const MON_9: ::nl_item = 23; -pub const MON_10: ::nl_item = 24; -pub const MON_11: ::nl_item = 25; -pub const MON_12: ::nl_item = 26; - -pub const ABMON_1: ::nl_item = 27; -pub const ABMON_2: ::nl_item = 28; -pub const ABMON_3: ::nl_item = 29; -pub const ABMON_4: ::nl_item = 30; -pub const ABMON_5: ::nl_item = 31; -pub const ABMON_6: ::nl_item = 32; -pub const ABMON_7: ::nl_item = 33; -pub const ABMON_8: ::nl_item = 34; -pub const ABMON_9: ::nl_item = 35; -pub const ABMON_10: ::nl_item = 36; -pub const ABMON_11: ::nl_item = 37; -pub const ABMON_12: ::nl_item = 38; - -pub const RADIXCHAR: ::nl_item = 39; -pub const THOUSEP: ::nl_item = 40; -pub const YESSTR: ::nl_item = 41; -pub const NOSTR: ::nl_item = 42; -pub const CRNCYSTR: ::nl_item = 43; - -pub const D_T_FMT: ::nl_item = 44; -pub const D_FMT: ::nl_item = 45; -pub const T_FMT: ::nl_item = 46; -pub const AM_STR: ::nl_item = 47; -pub const PM_STR: ::nl_item = 48; - -pub const CODESET: ::nl_item = 49; -pub const T_FMT_AMPM: ::nl_item = 50; -pub const ERA: ::nl_item = 51; -pub const ERA_D_FMT: ::nl_item = 52; -pub const ERA_D_T_FMT: ::nl_item = 53; -pub const ERA_T_FMT: ::nl_item = 54; -pub const ALT_DIGITS: ::nl_item = 55; -pub const YESEXPR: ::nl_item = 56; -pub const NOEXPR: ::nl_item = 57; -pub const _DATE_FMT: ::nl_item = 58; -pub const MAXSTRMSG: ::nl_item = 58; - -pub const PATH_MAX: ::c_int = 1024; - -pub const SA_ONSTACK: ::c_int = 0x00000001; -pub const SA_RESETHAND: ::c_int = 0x00000002; -pub const SA_RESTART: ::c_int = 0x00000004; -pub const SA_SIGINFO: ::c_int = 0x00000008; -pub const SA_NODEFER: ::c_int = 0x00000010; -pub const SA_NOCLDWAIT: ::c_int = 0x00010000; -pub const SA_NOCLDSTOP: ::c_int = 0x00020000; - -pub const SS_ONSTACK: ::c_int = 1; -pub const SS_DISABLE: ::c_int = 2; - -pub const FIONBIO: ::c_int = 0x8004667e; - -pub const SIGCHLD: ::c_int = 18; -pub const SIGBUS: ::c_int = 10; -pub const SIGINFO: ::c_int = 41; -pub const SIG_BLOCK: ::c_int = 1; -pub const SIG_UNBLOCK: ::c_int = 2; -pub const SIG_SETMASK: ::c_int = 3; - -pub const IPV6_MULTICAST_LOOP: ::c_int = 0x8; -pub const IPV6_V6ONLY: ::c_int = 0x27; - -pub const FD_SETSIZE: usize = 1024; - -pub const ST_RDONLY: ::c_ulong = 1; -pub const ST_NOSUID: ::c_ulong = 2; - -pub const NI_MAXHOST: ::socklen_t = 1025; - -pub const EXIT_FAILURE: ::c_int = 1; -pub const EXIT_SUCCESS: ::c_int = 0; -pub const RAND_MAX: ::c_int = 32767; -pub const EOF: ::c_int = -1; -pub const SEEK_SET: ::c_int = 0; -pub const SEEK_CUR: ::c_int = 1; -pub const SEEK_END: ::c_int = 2; -pub const _IOFBF: ::c_int = 0; -pub const _IONBF: ::c_int = 4; -pub const _IOLBF: ::c_int = 64; -pub const BUFSIZ: ::c_uint = 1024; -pub const FOPEN_MAX: ::c_uint = 20; -pub const FILENAME_MAX: ::c_uint = 1024; -pub const L_tmpnam: ::c_uint = 25; -pub const TMP_MAX: ::c_uint = 17576; - -pub const O_RDONLY: ::c_int = 0; -pub const O_WRONLY: ::c_int = 1; -pub const O_RDWR: ::c_int = 2; -pub const O_APPEND: ::c_int = 8; -pub const O_CREAT: ::c_int = 256; -pub const O_EXCL: ::c_int = 1024; -pub const O_NOCTTY: ::c_int = 2048; -pub const O_TRUNC: ::c_int = 512; -pub const O_CLOEXEC: ::c_int = 0x800000; -pub const O_ACCMODE: ::c_int = 0x600003; -pub const S_IFIFO: mode_t = 4096; -pub const S_IFCHR: mode_t = 8192; -pub const S_IFBLK: mode_t = 24576; -pub const S_IFDIR: mode_t = 16384; -pub const S_IFREG: mode_t = 32768; -pub const S_IFLNK: mode_t = 40960; -pub const S_IFSOCK: mode_t = 49152; -pub const S_IFMT: mode_t = 61440; -pub const S_IEXEC: mode_t = 64; -pub const S_IWRITE: mode_t = 128; -pub const S_IREAD: mode_t = 256; -pub const S_IRWXU: mode_t = 448; -pub const S_IXUSR: mode_t = 64; -pub const S_IWUSR: mode_t = 128; -pub const S_IRUSR: mode_t = 256; -pub const S_IRWXG: mode_t = 56; -pub const S_IXGRP: mode_t = 8; -pub const S_IWGRP: mode_t = 16; -pub const S_IRGRP: mode_t = 32; -pub const S_IRWXO: mode_t = 7; -pub const S_IXOTH: mode_t = 1; -pub const S_IWOTH: mode_t = 2; -pub const S_IROTH: mode_t = 4; -pub const F_OK: ::c_int = 0; -pub const R_OK: ::c_int = 4; -pub const W_OK: ::c_int = 2; -pub const X_OK: ::c_int = 1; -pub const STDIN_FILENO: ::c_int = 0; -pub const STDOUT_FILENO: ::c_int = 1; -pub const STDERR_FILENO: ::c_int = 2; -pub const F_LOCK: ::c_int = 1; -pub const F_TEST: ::c_int = 3; -pub const F_TLOCK: ::c_int = 2; -pub const F_ULOCK: ::c_int = 0; -pub const F_DUPFD_CLOEXEC: ::c_int = 37; -pub const F_SETLK: ::c_int = 6; -pub const F_SETLKW: ::c_int = 7; -pub const F_GETLK: ::c_int = 14; -pub const SIGHUP: ::c_int = 1; -pub const SIGINT: ::c_int = 2; -pub const SIGQUIT: ::c_int = 3; -pub const SIGILL: ::c_int = 4; -pub const SIGABRT: ::c_int = 6; -pub const SIGEMT: ::c_int = 7; -pub const SIGFPE: ::c_int = 8; -pub const SIGKILL: ::c_int = 9; -pub const SIGSEGV: ::c_int = 11; -pub const SIGSYS: ::c_int = 12; -pub const SIGPIPE: ::c_int = 13; -pub const SIGALRM: ::c_int = 14; -pub const SIGTERM: ::c_int = 15; -pub const SIGUSR1: ::c_int = 16; -pub const SIGUSR2: ::c_int = 17; -pub const SIGPWR: ::c_int = 19; -pub const SIGWINCH: ::c_int = 20; -pub const SIGURG: ::c_int = 21; -pub const SIGPOLL: ::c_int = 22; -pub const SIGIO: ::c_int = SIGPOLL; -pub const SIGSTOP: ::c_int = 23; -pub const SIGTSTP: ::c_int = 24; -pub const SIGCONT: ::c_int = 25; -pub const SIGTTIN: ::c_int = 26; -pub const SIGTTOU: ::c_int = 27; -pub const SIGVTALRM: ::c_int = 28; -pub const SIGPROF: ::c_int = 29; -pub const SIGXCPU: ::c_int = 30; -pub const SIGXFSZ: ::c_int = 31; - -pub const WNOHANG: ::c_int = 0x40; -pub const WUNTRACED: ::c_int = 0x04; - -pub const PROT_NONE: ::c_int = 0; -pub const PROT_READ: ::c_int = 1; -pub const PROT_WRITE: ::c_int = 2; -pub const PROT_EXEC: ::c_int = 4; - -pub const MAP_SHARED: ::c_int = 0x0001; -pub const MAP_PRIVATE: ::c_int = 0x0002; -pub const MAP_FIXED: ::c_int = 0x0010; -pub const MAP_NORESERVE: ::c_int = 0x40; -pub const MAP_ANON: ::c_int = 0x0100; -pub const MAP_RENAME: ::c_int = 0x20; -pub const MAP_ALIGN: ::c_int = 0x200; -pub const MAP_TEXT: ::c_int = 0x400; -pub const MAP_INITDATA: ::c_int = 0x800; -pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void; - -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const MS_SYNC: ::c_int = 0x0004; -pub const MS_ASYNC: ::c_int = 0x0001; -pub const MS_INVALIDATE: ::c_int = 0x0002; -pub const MS_INVALCURPROC: ::c_int = 0x0008; - -pub const EPERM: ::c_int = 1; -pub const ENOENT: ::c_int = 2; -pub const ESRCH: ::c_int = 3; -pub const EINTR: ::c_int = 4; -pub const EIO: ::c_int = 5; -pub const ENXIO: ::c_int = 6; -pub const E2BIG: ::c_int = 7; -pub const ENOEXEC: ::c_int = 8; -pub const EBADF: ::c_int = 9; -pub const ECHILD: ::c_int = 10; -pub const EDEADLK: ::c_int = 45; -pub const ENOMEM: ::c_int = 12; -pub const EACCES: ::c_int = 13; -pub const EFAULT: ::c_int = 14; -pub const ENOTBLK: ::c_int = 15; -pub const EBUSY: ::c_int = 16; -pub const EEXIST: ::c_int = 17; -pub const EXDEV: ::c_int = 18; -pub const ENODEV: ::c_int = 19; -pub const ENOTDIR: ::c_int = 20; -pub const EISDIR: ::c_int = 21; -pub const EINVAL: ::c_int = 22; -pub const ENFILE: ::c_int = 23; -pub const EMFILE: ::c_int = 24; -pub const ENOTTY: ::c_int = 25; -pub const ETXTBSY: ::c_int = 26; -pub const EFBIG: ::c_int = 27; -pub const ENOSPC: ::c_int = 28; -pub const ESPIPE: ::c_int = 29; -pub const EROFS: ::c_int = 30; -pub const EMLINK: ::c_int = 31; -pub const EPIPE: ::c_int = 32; -pub const EDOM: ::c_int = 33; -pub const ERANGE: ::c_int = 34; -pub const ENOTSUP: ::c_int = 48; -pub const EAGAIN: ::c_int = 11; -pub const EWOULDBLOCK: ::c_int = 11; -pub const EINPROGRESS: ::c_int = 150; -pub const EALREADY: ::c_int = 149; -pub const ENOTSOCK: ::c_int = 95; -pub const EDESTADDRREQ: ::c_int = 96; -pub const EMSGSIZE: ::c_int = 97; -pub const EPROTOTYPE: ::c_int = 98; -pub const ENOPROTOOPT: ::c_int = 99; -pub const EPROTONOSUPPORT: ::c_int = 120; -pub const ESOCKTNOSUPPORT: ::c_int = 121; -pub const EOPNOTSUPP: ::c_int = 122; -pub const EPFNOSUPPORT: ::c_int = 123; -pub const EAFNOSUPPORT: ::c_int = 124; -pub const EADDRINUSE: ::c_int = 125; -pub const EADDRNOTAVAIL: ::c_int = 126; -pub const ENETDOWN: ::c_int = 127; -pub const ENETUNREACH: ::c_int = 128; -pub const ENETRESET: ::c_int = 129; -pub const ECONNABORTED: ::c_int = 130; -pub const ECONNRESET: ::c_int = 131; -pub const ENOBUFS: ::c_int = 132; -pub const EISCONN: ::c_int = 133; -pub const ENOTCONN: ::c_int = 134; -pub const ESHUTDOWN: ::c_int = 143; -pub const ETOOMANYREFS: ::c_int = 144; -pub const ETIMEDOUT: ::c_int = 145; -pub const ECONNREFUSED: ::c_int = 146; -pub const ELOOP: ::c_int = 90; -pub const ENAMETOOLONG: ::c_int = 78; -pub const EHOSTDOWN: ::c_int = 147; -pub const EHOSTUNREACH: ::c_int = 148; -pub const ENOTEMPTY: ::c_int = 93; -pub const EUSERS: ::c_int = 94; -pub const EDQUOT: ::c_int = 49; -pub const ESTALE: ::c_int = 151; -pub const EREMOTE: ::c_int = 66; -pub const ENOLCK: ::c_int = 46; -pub const ENOSYS: ::c_int = 89; -pub const EIDRM: ::c_int = 36; -pub const ENOMSG: ::c_int = 35; -pub const EOVERFLOW: ::c_int = 79; -pub const ECANCELED: ::c_int = 47; -pub const EILSEQ: ::c_int = 88; -pub const EBADMSG: ::c_int = 77; -pub const EMULTIHOP: ::c_int = 74; -pub const ENOLINK: ::c_int = 67; -pub const EPROTO: ::c_int = 71; - -pub const EAI_SYSTEM: ::c_int = 11; - -pub const F_DUPFD: ::c_int = 0; -pub const F_GETFD: ::c_int = 1; -pub const F_SETFD: ::c_int = 2; -pub const F_GETFL: ::c_int = 3; -pub const F_SETFL: ::c_int = 4; - -pub const SIGTRAP: ::c_int = 5; - -pub const GLOB_APPEND : ::c_int = 32; -pub const GLOB_DOOFFS : ::c_int = 16; -pub const GLOB_ERR : ::c_int = 1; -pub const GLOB_MARK : ::c_int = 2; -pub const GLOB_NOCHECK : ::c_int = 8; -pub const GLOB_NOSORT : ::c_int = 4; -pub const GLOB_NOESCAPE: ::c_int = 64; - -pub const GLOB_NOSPACE : ::c_int = -2; -pub const GLOB_ABORTED : ::c_int = -1; -pub const GLOB_NOMATCH : ::c_int = -3; - -pub const POSIX_MADV_NORMAL: ::c_int = 0; -pub const POSIX_MADV_RANDOM: ::c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: ::c_int = 2; -pub const POSIX_MADV_WILLNEED: ::c_int = 3; -pub const POSIX_MADV_DONTNEED: ::c_int = 4; - -pub const _SC_IOV_MAX: ::c_int = 77; -pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 569; -pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 570; -pub const _SC_LOGIN_NAME_MAX: ::c_int = 571; -pub const _SC_MQ_PRIO_MAX: ::c_int = 30; -pub const _SC_THREAD_ATTR_STACKADDR: ::c_int = 577; -pub const _SC_THREAD_ATTR_STACKSIZE: ::c_int = 578; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: ::c_int = 568; -pub const _SC_THREAD_KEYS_MAX: ::c_int = 572; -pub const _SC_THREAD_PRIO_INHERIT: ::c_int = 580; -pub const _SC_THREAD_PRIO_PROTECT: ::c_int = 581; -pub const _SC_THREAD_PRIORITY_SCHEDULING: ::c_int = 579; -pub const _SC_THREAD_PROCESS_SHARED: ::c_int = 582; -pub const _SC_THREAD_SAFE_FUNCTIONS: ::c_int = 583; -pub const _SC_THREAD_STACK_MIN: ::c_int = 573; -pub const _SC_THREAD_THREADS_MAX: ::c_int = 574; -pub const _SC_THREADS: ::c_int = 576; -pub const _SC_TTY_NAME_MAX: ::c_int = 575; -pub const _SC_ATEXIT_MAX: ::c_int = 76; -pub const _SC_XOPEN_CRYPT: ::c_int = 62; -pub const _SC_XOPEN_ENH_I18N: ::c_int = 63; -pub const _SC_XOPEN_LEGACY: ::c_int = 717; -pub const _SC_XOPEN_REALTIME: ::c_int = 718; -pub const _SC_XOPEN_REALTIME_THREADS: ::c_int = 719; -pub const _SC_XOPEN_SHM: ::c_int = 64; -pub const _SC_XOPEN_UNIX: ::c_int = 78; -pub const _SC_XOPEN_VERSION: ::c_int = 12; -pub const _SC_XOPEN_XCU_VERSION: ::c_int = 67; - -pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0; -pub const PTHREAD_CREATE_DETACHED: ::c_int = 0x40; -pub const PTHREAD_PROCESS_SHARED: ::c_int = 1; -pub const PTHREAD_PROCESS_PRIVATE: u16 = 0; -pub const PTHREAD_STACK_MIN: ::size_t = 4096; - -pub const SIGSTKSZ: ::size_t = 8192; - -// https://illumos.org/man/3c/clock_gettime -// https://github.com/illumos/illumos-gate/ -// blob/HEAD/usr/src/lib/libc/amd64/sys/__clock_gettime.s -// clock_gettime(3c) doesn't seem to accept anything other than CLOCK_REALTIME -// or __CLOCK_REALTIME0 -// -// https://github.com/illumos/illumos-gate/ -// blob/HEAD/usr/src/uts/common/sys/time_impl.h -// Confusing! CLOCK_HIGHRES==CLOCK_MONOTONIC==4 -// __CLOCK_REALTIME0==0 is an obsoleted version of CLOCK_REALTIME==3 -pub const CLOCK_REALTIME: clockid_t = 3; -pub const CLOCK_MONOTONIC: clockid_t = 4; -pub const TIMER_RELTIME: ::c_int = 0; -pub const TIMER_ABSTIME: ::c_int = 1; - -pub const RLIMIT_CPU: ::c_int = 0; -pub const RLIMIT_FSIZE: ::c_int = 1; -pub const RLIMIT_DATA: ::c_int = 2; -pub const RLIMIT_STACK: ::c_int = 3; -pub const RLIMIT_CORE: ::c_int = 4; -pub const RLIMIT_NOFILE: ::c_int = 5; -pub const RLIMIT_VMEM: ::c_int = 6; -pub const RLIMIT_AS: ::c_int = RLIMIT_VMEM; - -pub const RLIM_NLIMITS: rlim_t = 7; -pub const RLIM_INFINITY: rlim_t = 0x7fffffff; - -pub const RUSAGE_SELF: ::c_int = 0; -pub const RUSAGE_CHILDREN: ::c_int = -1; - -pub const MADV_NORMAL: ::c_int = 0; -pub const MADV_RANDOM: ::c_int = 1; -pub const MADV_SEQUENTIAL: ::c_int = 2; -pub const MADV_WILLNEED: ::c_int = 3; -pub const MADV_DONTNEED: ::c_int = 4; -pub const MADV_FREE: ::c_int = 5; - -pub const AF_INET: ::c_int = 2; -pub const AF_INET6: ::c_int = 26; -pub const AF_UNIX: ::c_int = 1; -pub const SOCK_DGRAM: ::c_int = 1; -pub const SOCK_STREAM: ::c_int = 2; -pub const SOCK_RAW: ::c_int = 4; -pub const SOCK_RDM: ::c_int = 5; -pub const SOCK_SEQPACKET: ::c_int = 6; -pub const IPPROTO_TCP: ::c_int = 6; -pub const IPPROTO_IP: ::c_int = 0; -pub const IPPROTO_IPV6: ::c_int = 41; -pub const IP_MULTICAST_TTL: ::c_int = 17; -pub const IP_MULTICAST_LOOP: ::c_int = 18; -pub const IP_TTL: ::c_int = 4; -pub const IP_HDRINCL: ::c_int = 2; -pub const IP_ADD_MEMBERSHIP: ::c_int = 19; -pub const IP_DROP_MEMBERSHIP: ::c_int = 20; -pub const IPV6_JOIN_GROUP: ::c_int = 9; -pub const IPV6_LEAVE_GROUP: ::c_int = 10; - -pub const TCP_NODELAY: ::c_int = 1; -pub const TCP_KEEPIDLE: ::c_int = 34; -pub const SOL_SOCKET: ::c_int = 0xffff; -pub const SO_DEBUG: ::c_int = 0x01; -pub const SO_ACCEPTCONN: ::c_int = 0x0002; -pub const SO_REUSEADDR: ::c_int = 0x0004; -pub const SO_KEEPALIVE: ::c_int = 0x0008; -pub const SO_DONTROUTE: ::c_int = 0x0010; -pub const SO_BROADCAST: ::c_int = 0x0020; -pub const SO_USELOOPBACK: ::c_int = 0x0040; -pub const SO_LINGER: ::c_int = 0x0080; -pub const SO_OOBINLINE: ::c_int = 0x0100; -pub const SO_SNDBUF: ::c_int = 0x1001; -pub const SO_RCVBUF: ::c_int = 0x1002; -pub const SO_SNDLOWAT: ::c_int = 0x1003; -pub const SO_RCVLOWAT: ::c_int = 0x1004; -pub const SO_SNDTIMEO: ::c_int = 0x1005; -pub const SO_RCVTIMEO: ::c_int = 0x1006; -pub const SO_ERROR: ::c_int = 0x1007; -pub const SO_TYPE: ::c_int = 0x1008; - -pub const IFF_LOOPBACK: ::c_int = 0x8; - -pub const SHUT_RD: ::c_int = 0; -pub const SHUT_WR: ::c_int = 1; -pub const SHUT_RDWR: ::c_int = 2; - -pub const LOCK_SH: ::c_int = 1; -pub const LOCK_EX: ::c_int = 2; -pub const LOCK_NB: ::c_int = 4; -pub const LOCK_UN: ::c_int = 8; - -pub const O_SYNC: ::c_int = 16; -pub const O_NONBLOCK: ::c_int = 128; - -pub const IPPROTO_RAW: ::c_int = 255; - -pub const _SC_ARG_MAX: ::c_int = 1; -pub const _SC_CHILD_MAX: ::c_int = 2; -pub const _SC_CLK_TCK: ::c_int = 3; -pub const _SC_NGROUPS_MAX: ::c_int = 4; -pub const _SC_OPEN_MAX: ::c_int = 5; -pub const _SC_JOB_CONTROL: ::c_int = 6; -pub const _SC_SAVED_IDS: ::c_int = 7; -pub const _SC_VERSION: ::c_int = 8; -pub const _SC_PAGESIZE: ::c_int = 11; -pub const _SC_PAGE_SIZE: ::c_int = _SC_PAGESIZE; -pub const _SC_NPROCESSORS_ONLN: ::c_int = 15; -pub const _SC_STREAM_MAX: ::c_int = 16; -pub const _SC_TZNAME_MAX: ::c_int = 17; -pub const _SC_AIO_LISTIO_MAX: ::c_int = 18; -pub const _SC_AIO_MAX: ::c_int = 19; -pub const _SC_BC_BASE_MAX: ::c_int = 54; -pub const _SC_BC_DIM_MAX: ::c_int = 55; -pub const _SC_BC_SCALE_MAX: ::c_int = 56; -pub const _SC_BC_STRING_MAX: ::c_int = 57; -pub const _SC_COLL_WEIGHTS_MAX: ::c_int = 58; -pub const _SC_EXPR_NEST_MAX: ::c_int = 59; -pub const _SC_LINE_MAX: ::c_int = 60; -pub const _SC_RE_DUP_MAX: ::c_int = 61; -pub const _SC_2_VERSION: ::c_int = 53; -pub const _SC_2_C_BIND: ::c_int = 45; -pub const _SC_2_C_DEV: ::c_int = 46; -pub const _SC_2_CHAR_TERM: ::c_int = 66; -pub const _SC_2_FORT_DEV: ::c_int = 48; -pub const _SC_2_FORT_RUN: ::c_int = 49; -pub const _SC_2_LOCALEDEF: ::c_int = 50; -pub const _SC_2_SW_DEV: ::c_int = 51; -pub const _SC_2_UPE: ::c_int = 52; -pub const _SC_ASYNCHRONOUS_IO: ::c_int = 21; -pub const _SC_MAPPED_FILES: ::c_int = 24; -pub const _SC_MEMLOCK: ::c_int = 25; -pub const _SC_MEMLOCK_RANGE: ::c_int = 26; -pub const _SC_MEMORY_PROTECTION: ::c_int = 27; -pub const _SC_MESSAGE_PASSING: ::c_int = 28; -pub const _SC_PRIORITIZED_IO: ::c_int = 31; -pub const _SC_PRIORITY_SCHEDULING: ::c_int = 32; -pub const _SC_REALTIME_SIGNALS: ::c_int = 33; -pub const _SC_SEMAPHORES: ::c_int = 35; -pub const _SC_FSYNC: ::c_int = 23; -pub const _SC_SHARED_MEMORY_OBJECTS: ::c_int = 38; -pub const _SC_SYNCHRONIZED_IO: ::c_int = 42; -pub const _SC_TIMERS: ::c_int = 43; -pub const _SC_AIO_PRIO_DELTA_MAX: ::c_int = 20; -pub const _SC_DELAYTIMER_MAX: ::c_int = 22; -pub const _SC_MQ_OPEN_MAX: ::c_int = 29; -pub const _SC_RTSIG_MAX: ::c_int = 34; -pub const _SC_SEM_NSEMS_MAX: ::c_int = 36; -pub const _SC_SEM_VALUE_MAX: ::c_int = 37; -pub const _SC_SIGQUEUE_MAX: ::c_int = 39; -pub const _SC_TIMER_MAX: ::c_int = 44; - -pub const _MUTEX_MAGIC: u16 = 0x4d58; // MX -pub const _COND_MAGIC: u16 = 0x4356; // CV -pub const _RWL_MAGIC: u16 = 0x5257; // RW - -pub const NCCS: usize = 19; - -pub const LOG_CRON: ::c_int = 15 << 3; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __pthread_mutex_flag1: 0, - __pthread_mutex_flag2: 0, - __pthread_mutex_ceiling: 0, - __pthread_mutex_type: PTHREAD_PROCESS_PRIVATE, - __pthread_mutex_magic: _MUTEX_MAGIC, - __pthread_mutex_lock: 0, - __pthread_mutex_data: 0 -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __pthread_cond_flag: [0; 4], - __pthread_cond_type: PTHREAD_PROCESS_PRIVATE, - __pthread_cond_magic: _COND_MAGIC, - __pthread_cond_data: 0 -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __pthread_rwlock_readers: 0, - __pthread_rwlock_type: PTHREAD_PROCESS_PRIVATE, - __pthread_rwlock_magic: _RWL_MAGIC, - __pthread_rwlock_mutex: PTHREAD_MUTEX_INITIALIZER, - __pthread_rwlock_readercv: PTHREAD_COND_INITIALIZER, - __pthread_rwlock_writercv: PTHREAD_COND_INITIALIZER -}; -pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; -pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; -pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 4; -pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_NORMAL; - -pub const RTLD_NEXT: *mut ::c_void = -1isize as *mut ::c_void; -pub const RTLD_DEFAULT: *mut ::c_void = -2isize as *mut ::c_void; -pub const RTLD_SELF: *mut ::c_void = -3isize as *mut ::c_void; -pub const RTLD_PROBE: *mut ::c_void = -4isize as *mut ::c_void; - -pub const RTLD_NOW: ::c_int = 0x2; -pub const RTLD_NOLOAD: ::c_int = 0x4; -pub const RTLD_GLOBAL: ::c_int = 0x100; -pub const RTLD_LOCAL: ::c_int = 0x0; -pub const RTLD_PARENT: ::c_int = 0x200; -pub const RTLD_GROUP: ::c_int = 0x400; -pub const RTLD_WORLD: ::c_int = 0x800; -pub const RTLD_NODELETE: ::c_int = 0x1000; -pub const RTLD_FIRST: ::c_int = 0x2000; -pub const RTLD_CONFGEN: ::c_int = 0x10000; - -f! { - pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - (*set).fds_bits[fd / 32] &= !(1 << (fd % 32)); - return - } - - pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool { - let fd = fd as usize; - return ((*set).fds_bits[fd / 32] & (1 << (fd % 32))) != 0 - } - - pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - (*set).fds_bits[fd / 32] |= 1 << (fd % 32); - return - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } - - pub fn WIFEXITED(status: ::c_int) -> bool { - (status & 0xFF) == 0 - } - - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { - (status >> 8) & 0xFF - } - - pub fn WTERMSIG(status: ::c_int) -> ::c_int { - status & 0x7F - } -} - -extern { - pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int; - pub fn freeifaddrs(ifa: *mut ::ifaddrs); - - pub fn stack_getbounds(sp: *mut ::stack_t) -> ::c_int; - pub fn mincore(addr: *const ::c_void, len: ::size_t, - vec: *mut c_char) -> ::c_int; - pub fn setgroups(ngroups: ::c_int, - ptr: *const ::gid_t) -> ::c_int; - pub fn ioctl(fildes: ::c_int, request: ::c_int, ...) -> ::c_int; - pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int) - -> ::c_int; - pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_nanosleep(clk_id: clockid_t, - flags: ::c_int, - rqtp: *const ::timespec, - rmtp: *mut ::timespec) -> ::c_int; - pub fn getnameinfo(sa: *const ::sockaddr, - salen: ::socklen_t, - host: *mut ::c_char, - hostlen: ::socklen_t, - serv: *mut ::c_char, - sevlen: ::socklen_t, - flags: ::c_int) -> ::c_int; - pub fn getpwnam_r(name: *const ::c_char, - pwd: *mut passwd, - buf: *mut ::c_char, - buflen: ::c_int) -> *const passwd; - pub fn getpwuid_r(uid: ::uid_t, - pwd: *mut passwd, - buf: *mut ::c_char, - buflen: ::c_int) -> *const passwd; - pub fn setpwent(); - pub fn getpwent() -> *mut passwd; - pub fn readdir(dirp: *mut ::DIR) -> *const ::dirent; - pub fn fdatasync(fd: ::c_int) -> ::c_int; - pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; - pub fn duplocale(base: ::locale_t) -> ::locale_t; - pub fn freelocale(loc: ::locale_t); - pub fn newlocale(mask: ::c_int, - locale: *const ::c_char, - base: ::locale_t) -> ::locale_t; - pub fn uselocale(loc: ::locale_t) -> ::locale_t; - pub fn getprogname() -> *const ::c_char; - pub fn setprogname(name: *const ::c_char); - pub fn getloadavg(loadavg: *mut ::c_double, nelem: ::c_int) -> ::c_int; - pub fn getpriority(which: ::c_int, who: ::c_int) -> ::c_int; - pub fn setpriority(which: ::c_int, who: ::c_int, prio: ::c_int) -> ::c_int; - - pub fn openat(dirfd: ::c_int, pathname: *const ::c_char, - flags: ::c_int, ...) -> ::c_int; - pub fn faccessat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::c_int, flags: ::c_int) -> ::c_int; - pub fn fchmodat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t, flags: ::c_int) -> ::c_int; - pub fn fchownat(dirfd: ::c_int, pathname: *const ::c_char, - owner: ::uid_t, group: ::gid_t, - flags: ::c_int) -> ::c_int; - pub fn fstatat(dirfd: ::c_int, pathname: *const ::c_char, - buf: *mut stat, flags: ::c_int) -> ::c_int; - pub fn linkat(olddirfd: ::c_int, oldpath: *const ::c_char, - newdirfd: ::c_int, newpath: *const ::c_char, - flags: ::c_int) -> ::c_int; - pub fn mkdirat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t) -> ::c_int; - pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t, dev: dev_t) -> ::c_int; - pub fn readlinkat(dirfd: ::c_int, pathname: *const ::c_char, - buf: *mut ::c_char, bufsiz: ::size_t) -> ::ssize_t; - pub fn renameat(olddirfd: ::c_int, oldpath: *const ::c_char, - newdirfd: ::c_int, newpath: *const ::c_char) - -> ::c_int; - pub fn symlinkat(target: *const ::c_char, newdirfd: ::c_int, - linkpath: *const ::c_char) -> ::c_int; - pub fn unlinkat(dirfd: ::c_int, pathname: *const ::c_char, - flags: ::c_int) -> ::c_int; - pub fn mkfifoat(dirfd: ::c_int, pathname: *const ::c_char, - mode: ::mode_t) -> ::c_int; - pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int; - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); - pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t, - clock_id: *mut clockid_t) -> ::c_int; - pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, - clock_id: clockid_t) -> ::c_int; - pub fn sem_timedwait(sem: *mut sem_t, - abstime: *const ::timespec) -> ::c_int; - pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, - abstime: *const ::timespec) -> ::c_int; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/src/windows.rs cargo-0.19.0/vendor/libc-0.2.18/src/windows.rs --- cargo-0.17.0/vendor/libc-0.2.18/src/windows.rs 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/src/windows.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,235 +0,0 @@ -//! Windows CRT definitions - -pub type c_char = i8; -pub type c_long = i32; -pub type c_ulong = u32; -pub type wchar_t = u16; - -pub type clock_t = i32; - -cfg_if! { - if #[cfg(all(target_arch = "x86", target_env = "gnu"))] { - pub type time_t = i32; - } else { - pub type time_t = i64; - } -} - -pub type off_t = i32; -pub type dev_t = u32; -pub type ino_t = u16; -pub enum timezone {} -pub type time64_t = i64; - -s! { - // note this is the struct called stat64 in Windows. Not stat, nor stati64. - pub struct stat { - pub st_dev: dev_t, - pub st_ino: ino_t, - pub st_mode: u16, - pub st_nlink: ::c_short, - pub st_uid: ::c_short, - pub st_gid: ::c_short, - pub st_rdev: dev_t, - pub st_size: i64, - pub st_atime: time64_t, - pub st_mtime: time64_t, - pub st_ctime: time64_t, - } - - // note that this is called utimbuf64 in Windows - pub struct utimbuf { - pub actime: time64_t, - pub modtime: time64_t, - } - - pub struct timeval { - pub tv_sec: c_long, - pub tv_usec: c_long, - } - - pub struct timespec { - pub tv_sec: time_t, - pub tv_nsec: c_long, - } -} - -pub const EXIT_FAILURE: ::c_int = 1; -pub const EXIT_SUCCESS: ::c_int = 0; -pub const RAND_MAX: ::c_int = 32767; -pub const EOF: ::c_int = -1; -pub const SEEK_SET: ::c_int = 0; -pub const SEEK_CUR: ::c_int = 1; -pub const SEEK_END: ::c_int = 2; -pub const _IOFBF: ::c_int = 0; -pub const _IONBF: ::c_int = 4; -pub const _IOLBF: ::c_int = 64; -pub const BUFSIZ: ::c_uint = 512; -pub const FOPEN_MAX: ::c_uint = 20; -pub const FILENAME_MAX: ::c_uint = 260; - -cfg_if! { - if #[cfg(all(target_env = "gnu"))] { - pub const L_tmpnam: ::c_uint = 14; - pub const TMP_MAX: ::c_uint = 0x7fff; - } else if #[cfg(all(target_env = "msvc"))] { - pub const L_tmpnam: ::c_uint = 260; - pub const TMP_MAX: ::c_uint = 0x7fff_ffff; - } else { - // Unknown target_env - } -} - -pub const O_RDONLY: ::c_int = 0; -pub const O_WRONLY: ::c_int = 1; -pub const O_RDWR: ::c_int = 2; -pub const O_APPEND: ::c_int = 8; -pub const O_CREAT: ::c_int = 256; -pub const O_EXCL: ::c_int = 1024; -pub const O_TEXT: ::c_int = 16384; -pub const O_BINARY: ::c_int = 32768; -pub const O_NOINHERIT: ::c_int = 128; -pub const O_TRUNC: ::c_int = 512; -pub const S_IFCHR: ::c_int = 8192; -pub const S_IFDIR: ::c_int = 16384; -pub const S_IFREG: ::c_int = 32768; -pub const S_IFMT: ::c_int = 61440; -pub const S_IEXEC: ::c_int = 64; -pub const S_IWRITE: ::c_int = 128; -pub const S_IREAD: ::c_int = 256; - -pub const LC_ALL: ::c_int = 0; -pub const LC_COLLATE: ::c_int = 1; -pub const LC_CTYPE: ::c_int = 2; -pub const LC_MONETARY: ::c_int = 3; -pub const LC_NUMERIC: ::c_int = 4; -pub const LC_TIME: ::c_int = 5; - -pub const EPERM: ::c_int = 1; -pub const ENOENT: ::c_int = 2; -pub const ESRCH: ::c_int = 3; -pub const EINTR: ::c_int = 4; -pub const EIO: ::c_int = 5; -pub const ENXIO: ::c_int = 6; -pub const E2BIG: ::c_int = 7; -pub const ENOEXEC: ::c_int = 8; -pub const EBADF: ::c_int = 9; -pub const ECHILD: ::c_int = 10; -pub const EAGAIN: ::c_int = 11; -pub const ENOMEM: ::c_int = 12; -pub const EACCES: ::c_int = 13; -pub const EFAULT: ::c_int = 14; -pub const EBUSY: ::c_int = 16; -pub const EEXIST: ::c_int = 17; -pub const EXDEV: ::c_int = 18; -pub const ENODEV: ::c_int = 19; -pub const ENOTDIR: ::c_int = 20; -pub const EISDIR: ::c_int = 21; -pub const EINVAL: ::c_int = 22; -pub const ENFILE: ::c_int = 23; -pub const EMFILE: ::c_int = 24; -pub const ENOTTY: ::c_int = 25; -pub const EFBIG: ::c_int = 27; -pub const ENOSPC: ::c_int = 28; -pub const ESPIPE: ::c_int = 29; -pub const EROFS: ::c_int = 30; -pub const EMLINK: ::c_int = 31; -pub const EPIPE: ::c_int = 32; -pub const EDOM: ::c_int = 33; -pub const ERANGE: ::c_int = 34; -pub const EDEADLK: ::c_int = 36; -pub const EDEADLOCK: ::c_int = 36; -pub const ENAMETOOLONG: ::c_int = 38; -pub const ENOLCK: ::c_int = 39; -pub const ENOSYS: ::c_int = 40; -pub const ENOTEMPTY: ::c_int = 41; -pub const EILSEQ: ::c_int = 42; -pub const STRUNCATE: ::c_int = 80; - -#[cfg(all(target_env = "msvc", stdbuild))] // " if " -- appease style checker -#[link(name = "msvcrt", cfg(not(target_feature = "crt-static")))] -#[link(name = "libcmt", cfg(target_feature = "crt-static"))] -extern {} - -extern { - #[link_name = "_chmod"] - pub fn chmod(path: *const c_char, mode: ::c_int) -> ::c_int; - #[link_name = "_wchmod"] - pub fn wchmod(path: *const wchar_t, mode: ::c_int) -> ::c_int; - #[link_name = "_mkdir"] - pub fn mkdir(path: *const c_char) -> ::c_int; - #[link_name = "_wrmdir"] - pub fn wrmdir(path: *const wchar_t) -> ::c_int; - #[link_name = "_fstat64"] - pub fn fstat(fildes: ::c_int, buf: *mut stat) -> ::c_int; - #[link_name = "_stat64"] - pub fn stat(path: *const c_char, buf: *mut stat) -> ::c_int; - #[link_name = "_wstat64"] - pub fn wstat(path: *const wchar_t, buf: *mut stat) -> ::c_int; - #[link_name = "_wutime64"] - pub fn wutime(file: *const wchar_t, buf: *mut utimbuf) -> ::c_int; - #[link_name = "_popen"] - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut ::FILE; - #[link_name = "_pclose"] - pub fn pclose(stream: *mut ::FILE) -> ::c_int; - #[link_name = "_fdopen"] - pub fn fdopen(fd: ::c_int, mode: *const c_char) -> *mut ::FILE; - #[link_name = "_fileno"] - pub fn fileno(stream: *mut ::FILE) -> ::c_int; - #[link_name = "_open"] - pub fn open(path: *const c_char, oflag: ::c_int, ...) -> ::c_int; - #[link_name = "_wopen"] - pub fn wopen(path: *const wchar_t, oflag: ::c_int, ...) -> ::c_int; - #[link_name = "_creat"] - pub fn creat(path: *const c_char, mode: ::c_int) -> ::c_int; - #[link_name = "_access"] - pub fn access(path: *const c_char, amode: ::c_int) -> ::c_int; - #[link_name = "_chdir"] - pub fn chdir(dir: *const c_char) -> ::c_int; - #[link_name = "_close"] - pub fn close(fd: ::c_int) -> ::c_int; - #[link_name = "_dup"] - pub fn dup(fd: ::c_int) -> ::c_int; - #[link_name = "_dup2"] - pub fn dup2(src: ::c_int, dst: ::c_int) -> ::c_int; - #[link_name = "_execv"] - pub fn execv(prog: *const c_char, argv: *const *const c_char) -> ::intptr_t; - #[link_name = "_execve"] - pub fn execve(prog: *const c_char, argv: *const *const c_char, - envp: *const *const c_char) -> ::c_int; - #[link_name = "_execvp"] - pub fn execvp(c: *const c_char, argv: *const *const c_char) -> ::c_int; - #[link_name = "_execvpe"] - pub fn execvpe(c: *const c_char, argv: *const *const c_char, - envp: *const *const c_char) -> ::c_int; - #[link_name = "_getcwd"] - pub fn getcwd(buf: *mut c_char, size: ::c_int) -> *mut c_char; - #[link_name = "_getpid"] - pub fn getpid() -> ::c_int; - #[link_name = "_isatty"] - pub fn isatty(fd: ::c_int) -> ::c_int; - #[link_name = "_lseek"] - pub fn lseek(fd: ::c_int, offset: c_long, origin: ::c_int) -> c_long; - #[link_name = "_pipe"] - pub fn pipe(fds: *mut ::c_int, - psize: ::c_uint, - textmode: ::c_int) -> ::c_int; - #[link_name = "_read"] - pub fn read(fd: ::c_int, buf: *mut ::c_void, count: ::c_uint) -> ::c_int; - #[link_name = "_rmdir"] - pub fn rmdir(path: *const c_char) -> ::c_int; - #[link_name = "_unlink"] - pub fn unlink(c: *const c_char) -> ::c_int; - #[link_name = "_write"] - pub fn write(fd: ::c_int, buf: *const ::c_void, count: ::c_uint) -> ::c_int; - #[link_name = "_commit"] - pub fn commit(fd: ::c_int) -> ::c_int; - #[link_name = "_get_osfhandle"] - pub fn get_osfhandle(fd: ::c_int) -> ::intptr_t; - #[link_name = "_open_osfhandle"] - pub fn open_osfhandle(osfhandle: ::intptr_t, flags: ::c_int) -> ::c_int; - pub fn setlocale(category: ::c_int, locale: *const c_char) -> *mut c_char; - #[link_name = "_wsetlocale"] - pub fn wsetlocale(category: ::c_int, - locale: *const wchar_t) -> *mut wchar_t; -} diff -Nru cargo-0.17.0/vendor/libc-0.2.18/.travis.yml cargo-0.19.0/vendor/libc-0.2.18/.travis.yml --- cargo-0.17.0/vendor/libc-0.2.18/.travis.yml 2017-03-24 16:59:43.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.18/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -language: rust -sudo: required -dist: trusty -services: - - docker -install: - - curl https://static.rust-lang.org/rustup.sh | - sh -s -- --add-target=$TARGET --disable-sudo -y --prefix=`rustc --print sysroot` -script: - - cargo build - - cargo build --no-default-features - - cargo generate-lockfile --manifest-path libc-test/Cargo.toml - - if [[ $TRAVIS_OS_NAME = "linux" ]]; then - sh ci/run-docker.sh $TARGET; - else - export CARGO_TARGET_DIR=`pwd`/target; - sh ci/run.sh $TARGET; - fi - - rustc ci/style.rs && ./style src -osx_image: xcode7.3 -env: - global: - secure: "e2/3QjgRN9atOuSHp22TrYG7QVKcYUWY48Hi9b60w+r1+BhPkTseIJLte7WefRhdXtqpjjUJTooKDhnurFOeHaCT+nmBgiv+FPU893sBl4bhesY4m0vgUJVbNZcs6lTImYekWVb+aqjGdgV/XAgCw7c3kPmrZV0MzGDWL64Xaps=" -matrix: - include: - # 1.0.0 compat - - os: linux - env: TARGET=x86_64-unknown-linux-gnu - rust: 1.0.0 - script: cargo build - install: - - # build documentation - - os: linux - env: TARGET=x86_64-unknown-linux-gnu - rust: nightly - script: sh ci/dox.sh - - # stable compat - - os: linux - env: TARGET=x86_64-unknown-linux-gnu - rust: stable - - os: linux - env: TARGET=i686-unknown-linux-gnu - rust: stable - - os: osx - env: TARGET=x86_64-apple-darwin - rust: stable - - os: osx - env: TARGET=i686-apple-darwin - rust: stable - - os: linux - env: TARGET=arm-linux-androideabi - rust: stable - - os: linux - env: TARGET=x86_64-unknown-linux-musl - rust: stable - - os: linux - env: TARGET=i686-unknown-linux-musl - rust: stable - - os: linux - env: TARGET=arm-unknown-linux-gnueabihf - rust: stable - - os: linux - env: TARGET=aarch64-unknown-linux-gnu - rust: stable - - os: osx - env: TARGET=i386-apple-ios - rust: stable - - os: osx - env: TARGET=x86_64-apple-ios - rust: stable - - os: linux - env: TARGET=x86_64-rumprun-netbsd - rust: stable - - os: linux - env: TARGET=powerpc-unknown-linux-gnu - rust: stable - - os: linux - env: TARGET=powerpc64-unknown-linux-gnu - rust: beta - - os: linux - env: TARGET=mips-unknown-linux-musl - rust: stable - - os: linux - env: TARGET=mipsel-unknown-linux-musl - rust: stable - - os: linux - env: TARGET=mips64-unknown-linux-gnuabi64 - rust: beta - - os: linux - env: TARGET=mips-unknown-linux-gnu - rust: beta - - # beta - - os: linux - env: TARGET=x86_64-unknown-linux-gnu - rust: beta - - os: osx - env: TARGET=x86_64-apple-darwin - rust: beta - - # nightly - - os: linux - env: TARGET=x86_64-unknown-linux-gnu - rust: nightly - - os: osx - env: TARGET=x86_64-apple-darwin - rust: nightly - - # QEMU based targets that compile in an emulator - - os: linux - env: TARGET=x86_64-unknown-freebsd - rust: stable - - os: linux - env: TARGET=x86_64-unknown-openbsd QEMU=openbsd.qcow2 - rust: stable - script: sh ci/run-docker.sh $TARGET - install: - -cache: cargo - -notifications: - email: - on_success: never - webhooks: https://buildbot.rust-lang.org/homu/travis diff -Nru cargo-0.17.0/vendor/libc-0.2.21/appveyor.yml cargo-0.19.0/vendor/libc-0.2.21/appveyor.yml --- cargo-0.17.0/vendor/libc-0.2.21/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/appveyor.yml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,25 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-gnu + MSYS2_BITS: 64 + - TARGET: i686-pc-windows-gnu + MSYS2_BITS: 32 + - TARGET: x86_64-pc-windows-msvc + - TARGET: i686-pc-windows-msvc +install: + - curl -sSf -o rustup-init.exe https://win.rustup.rs/ + - rustup-init.exe -y --default-host %TARGET% + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin + - if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test --target %TARGET% + - cargo run --manifest-path libc-test/Cargo.toml --target %TARGET% + +cache: + - target + - C:\Users\appveyor\.cargo\registry diff -Nru cargo-0.17.0/vendor/libc-0.2.21/.cargo-checksum.json cargo-0.19.0/vendor/libc-0.2.21/.cargo-checksum.json --- cargo-0.17.0/vendor/libc-0.2.21/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/.cargo-checksum.json 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"7150ee9391a955b2ef7e0762fc61c0c1aab167620ca36d88d78062d93b8334ba",".travis.yml":"261a5d76519569e16628b20016a9c66e9daa8ee12c6dbb882446987f5c5453b6","Cargo.toml":"ccf723ba987f4387767a21f47678713758d06422722aace4a8d923aced3ab366","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"a2ffaf2a812cec7c243fd753891c7e2eada5b61d9846dd5318d7c99d4514c988","appveyor.yml":"c0d70c650b6231e6ff78a352224f1a522a9be69d9da4251adbaddb3f0393294d","ci/README.md":"be804f15e2128e5fd4b160cb0b13cff5f19e7d77b55ec5254aa6fd8731c84f0d","ci/android-accept-licenses.sh":"84ad00815f628005ed22c5d6cd14990ebc97812a7163bd275b2877904eddab53","ci/android-install-ndk.sh":"73c5f21438c024ce1b8c793184447ff9aecd83c87cbde8de580b782842b3563f","ci/android-install-sdk.sh":"44bcdacd4a38f4f7597937dc4120a0dd61df0f94cd51ec0dfa4dd8f5621b1afa","ci/docker/aarch64-linux-android/Dockerfile":"68f375c6e0b3716f8c0b6ed217bc6c3631fa2b86c578b983a0bf60d0344efd74","ci/docker/aarch64-unknown-linux-gnu/Dockerfile":"62ca7317439f9c303990e897450a91cd467be05eb75dfc01456d417932ac8672","ci/docker/arm-linux-androideabi/Dockerfile":"1193bf048efbeb9be3c9fac0836dcf6ae07e12cdf09b36b386dd4cbd62abbffa","ci/docker/arm-unknown-linux-gnueabihf/Dockerfile":"e349f7caa463adbde8d6ec4d2b9f7720ed81c77f48d75bbfb78c89751f55c2dc","ci/docker/i686-linux-android/Dockerfile":"7c353aecdf0b21d8584392cc6ea31d455adad769034c3ea09191a1b26a2521e1","ci/docker/i686-unknown-linux-gnu/Dockerfile":"07e9df6ba91025cbec7ae81ade63f8cfb8a54c5e1e5a8f8def0617e17bd59db0","ci/docker/i686-unknown-linux-musl/Dockerfile":"fcaedc90fbb90375186b36b4324bff0a042aae70695be0f2b632e3cf7479eae6","ci/docker/mips-unknown-linux-gnu/Dockerfile":"860299d96ee50ebdbd788e65eb6ba1f561ef66107647bddffcb2567ac350896b","ci/docker/mips-unknown-linux-musl/Dockerfile":"711c43122fa34cee83a69944493213924b0ff1fccd78c7a141cb2b2127526484","ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile":"163776e0fd38f66df7415421202ac29efc7d345a628947434e573c3885594ab5","ci/docker/mipsel-unknown-linux-musl/Dockerfile":"aef213df08da03ab6a723c3e6e5594a0586251950d81482cf53179d8e64e95c7","ci/docker/powerpc-unknown-linux-gnu/Dockerfile":"08b846a338c2ee70100f4e80db812668dc58bfb536c44a95cd1cf004d965186b","ci/docker/powerpc64-unknown-linux-gnu/Dockerfile":"4da285ffd035d16f5da9e3701841eb86049c8cfa417fa81e53da4ef74152eac0","ci/docker/x86_64-rumprun-netbsd/Dockerfile":"44c3107fb30380785aaed6ff73fa334017a5bb4e3b5c7d4876154f09023a2b99","ci/docker/x86_64-unknown-freebsd/Dockerfile":"ef0f9f63065218728d2daafaa5ba71b17e4ccc23d72e859e0a7133fc64c0815e","ci/docker/x86_64-unknown-linux-gnu/Dockerfile":"67fabbc8c6ac02376cf9344251ad49ecdac396b71accb572fd1ae65225325bc0","ci/docker/x86_64-unknown-linux-musl/Dockerfile":"f71019fed5204b950843ef5e56144161fda7e27fad68ed0e8bc4353c388c7bcf","ci/docker/x86_64-unknown-openbsd/Dockerfile":"dfa5c23a6cff8c7a9a846668118c71a8406a360801fd3632fb12e8fbda6b7338","ci/dox.sh":"9ea240a4a607036235fd68c01b5d2a59f365768d103d3be774dcf34aa3ff563e","ci/ios/deploy_and_run_on_ios_simulator.rs":"3175066fd7f82390f6226d881e1a1dda9767ea2705656870e0d7774e2731800e","ci/landing-page-footer.html":"b70b3112c2147f5c967e7481061ef38bc2d79a28dd55a16fb916d9c9426da2c4","ci/landing-page-head.html":"ad69663fac7924f27d0209bc519d55838e86edfc4133713a6fd08caadac1b142","ci/run-docker.sh":"ce112501d0299bb77e0150bda6c2881bc9b6b6eb2ccc81ca43c44e2cc689582d","ci/run-qemu.sh":"bb859421170871ef23a8940c5e150efec0c01b95e32d2ce2d37b79a45d9d346c","ci/run.sh":"077e634d44bef2584e769c27511987a96fe07f11407de30da4d3776a2922fcfe","ci/style.rs":"60564abc1d5197ed1598426dd0d6ee9939a16d2875b03373538f58843bb616c4","src/dox.rs":"f732d3c0dcd6ace854ee32d8f898b96ac42204a799c6e386c4ba88f6e58673dc","src/lib.rs":"651696755aed40230cde8a505a2dfef61bea52d6349c9b9343497a5c7158ec92","src/macros.rs":"bd9802772b0e5c8b3c550d1c24307f06c0d1e4ce656b4ae1cf092142bbe5412c","src/redox.rs":"3dd158ba9fbbabe96ce2607e91dbf07b93b37c0427734118702dcb4901fe8964","src/unix/bsd/apple/b32.rs":"110ecff78da0e8d405d861447904da403d8b3f6da1f0f9dc9987633f3f04fe46","src/unix/bsd/apple/b64.rs":"e6808081c0b276cca3189628716f507c7c0d00b62417cd44addbdaefe848cec7","src/unix/bsd/apple/mod.rs":"2f6869a5649e2eecdea3992bc6759b7c0a2f726a8e76f2fdf8f5fbe583c91d8b","src/unix/bsd/freebsdlike/dragonfly/mod.rs":"54b3b30c4cac35ced9197e7267f209b0f168de8a6899ab0cba290c5ae399f0e7","src/unix/bsd/freebsdlike/freebsd/aarch64.rs":"c7f46b9ae23fde5a9e245a28ed1380066e67f081323b4d253a18e9da3b97b860","src/unix/bsd/freebsdlike/freebsd/mod.rs":"593a10bae7761dc3c77cbeb10d34d122c2e37e1635f4fef65b199f58a77f521e","src/unix/bsd/freebsdlike/freebsd/x86.rs":"54311d3ebf2bb091ab22361e377e6ef9224aec2ecfe459fbfcedde4932db9c58","src/unix/bsd/freebsdlike/freebsd/x86_64.rs":"c7f46b9ae23fde5a9e245a28ed1380066e67f081323b4d253a18e9da3b97b860","src/unix/bsd/freebsdlike/mod.rs":"33683d0e9b21420c7f119e485fdd4888db7d6f28d9aa5903c2271e585209e788","src/unix/bsd/mod.rs":"6c8511618608536d5026222c8e8f1c93af576a4bad7452bc1cd954d72c47615b","src/unix/bsd/netbsdlike/mod.rs":"a73e1c3a0c5956644ec012db5a069b4d0170c1e0733c02c6c720e4265cab731c","src/unix/bsd/netbsdlike/netbsd/mod.rs":"62195940cf0ee3cf2e36fed28e0b57cc7dc701fb029ae427e203b0b2a055152b","src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs":"927eeccaf3269d299db4c2a55f8010807bf43dfa894aea6a783215f5d3560baa","src/unix/bsd/netbsdlike/netbsd/other/mod.rs":"4d9f7091af8e166943ac6f42ce85558909e5b6e61325039bff7adfbcf4b90212","src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs":"f8cd05dacd3a3136c58da5a2fbe26f703767823b28e74fe8a2b57a7bd98d6d5c","src/unix/bsd/netbsdlike/openbsdlike/mod.rs":"00fe0616a9191f344450c2dc1906becd8c54b73ceb2d50826d52e8e18beed8ca","src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs":"b1b9cf7be9f0e4d294a57092594074ad03a65fe0eeac9d1104fa874c313e7900","src/unix/bsd/netbsdlike/openbsdlike/other/b32/mod.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/bsd/netbsdlike/openbsdlike/other/b64/mod.rs":"927eeccaf3269d299db4c2a55f8010807bf43dfa894aea6a783215f5d3560baa","src/unix/bsd/netbsdlike/openbsdlike/other/mod.rs":"f5d8db6f54efd05520b31b764a6bacbf612e1aebce097d2d5bfaaef3b91f37b5","src/unix/haiku/b32.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/haiku/b64.rs":"b422430c550c0ba833c9206d1350861e344e3a2eb33d7d58693efb35044be1cc","src/unix/haiku/mod.rs":"ceb889e625bc5df542749a12f4cf593d3bf7e564682d0708ec42af16ed91ff3f","src/unix/mod.rs":"eecf3c2bf19086af9bf910e931433e279412645d80b4a6fa1cc097973ad85a18","src/unix/notbsd/android/b32/arm.rs":"d63a234f3a2399bdef1fa13c6a209386cb5ddac04c90f2c6d42badc8235dc49d","src/unix/notbsd/android/b32/mod.rs":"01afdb30feaa5376eecf4e1e045fc01915a369740c191f6b29589f41b8288792","src/unix/notbsd/android/b32/x86.rs":"10e6879dcbf136f0e907337987a0609b357e27e0f24ccb907525fcad881c24c3","src/unix/notbsd/android/b64/mod.rs":"dbfed07be2a8368158d4bc9766147c36f4b8befd44d783669f1df53bb516dbe8","src/unix/notbsd/android/mod.rs":"d38cd4bcac6eb282adee4cf61c4abbbf4bda2f29db5428e795f0741338c792a6","src/unix/notbsd/linux/mips/mips32.rs":"b268f603f71d854614c20cea00431812def9b683d43e6254ae62a8f88a14f7c3","src/unix/notbsd/linux/mips/mips64.rs":"8bce84a47a6ad7fc95234fdd3513ddb8c78634f4ac18209c0276ab705c092ebe","src/unix/notbsd/linux/mips/mod.rs":"6d4a6a25d8f7ff690f30697bc832c556f4d95eae1f89870ac74a5e33804bceea","src/unix/notbsd/linux/mod.rs":"98b9e87bf4d4396c2195d19f95fa1f6129a92f2aa3783d328438cd35e90d6758","src/unix/notbsd/linux/musl/b32/arm.rs":"d43ba5c528926261b1ccd529ab55636254360a084ab84a7ec22a4eb5afddb8f8","src/unix/notbsd/linux/musl/b32/asmjs.rs":"085e410f990312de76f74cb9bbf9fcc27d686e94334143b34511f565d1b8bb91","src/unix/notbsd/linux/musl/b32/mips.rs":"941c88bc413ba2b87eb6a6cfe03355ee148b865f39598456965b3d04b3adbbe6","src/unix/notbsd/linux/musl/b32/mod.rs":"fc7e055edbe6fa170c2bda6b3415066c1871b3e60ebb70a6329d008f77100f0b","src/unix/notbsd/linux/musl/b32/x86.rs":"ffa5781b52a7d2774a34b1e3e2034a498b919fd96eb85e88098dba2e674a3797","src/unix/notbsd/linux/musl/b64/aarch64.rs":"4009c7eaf703472daef2a70bdac910d9fc395a33689ef2e8cf1c4e692445d3f0","src/unix/notbsd/linux/musl/b64/mod.rs":"d9b03a59a54a568b07ffa4380103a926cbb45dbfd87745edef79cd971ef31283","src/unix/notbsd/linux/musl/b64/powerpc64.rs":"dc28f5b7284235d6cf5519053cac59a1c16dc39223b71cca0871e4880755f852","src/unix/notbsd/linux/musl/b64/x86_64.rs":"43291acc0dfc92c2fec8ba6ce77ee9ca3c20bcdccec18e149f95ba911cee704b","src/unix/notbsd/linux/musl/mod.rs":"edc9ba321a31b24a16bdd88dcc413238ff8c5eba91b56f68e0a7c3ee6d303282","src/unix/notbsd/linux/other/b32/arm.rs":"626e79726de74a94baf3dc2a958a52fd3a5d8f86e93aef558e18701dc67c1582","src/unix/notbsd/linux/other/b32/mod.rs":"6122be151c5d85b9df6a3aaefc5cafd0657b371cafa9cb31ed9b1a394976af45","src/unix/notbsd/linux/other/b32/powerpc.rs":"433b2a7c795e58f881d67ea19d50433e1290799372b5e20b2707ad4dc5f9807d","src/unix/notbsd/linux/other/b32/x86.rs":"0a7cae86b1c67b32fc2991d32402b866a649bd86665910b7c2e0857eebf83695","src/unix/notbsd/linux/other/b64/aarch64.rs":"5b1acbea79ad5244e22fc020f3f26aff9e0877f426c21877fc5bb61de9113014","src/unix/notbsd/linux/other/b64/mod.rs":"bee90e8d9217ee344d0e99fd483766a1b28e8b1ded930d44a0400a5e5224bb6a","src/unix/notbsd/linux/other/b64/powerpc64.rs":"870423ef564c7e3d31968d22612514b52e274d3b4bd8cd6b07ffa71f92ca260f","src/unix/notbsd/linux/other/b64/sparc64.rs":"fbfb3b17da347723ddf15fb490e51acdddc9ca7e6b4db072370ba2619cc2ff97","src/unix/notbsd/linux/other/b64/x86_64.rs":"3f7f76717861885d7b8cfdc1d5e4c0a1dd6fb37e43c7d530575503f6fedd58c1","src/unix/notbsd/linux/other/mod.rs":"bc9c99bfd399a16c569902c22fd34083cefdc03633840eff1d9bdcd9f376ffc3","src/unix/notbsd/linux/s390x.rs":"0ed3108cca67cb731f334d6beecbb99fdfc16de475320007d354fe1c4571fbd8","src/unix/notbsd/mod.rs":"68186f6015bdb9442a08dde6c34cab7cad4d788b163b6e42cdff51385f27387e","src/unix/solaris/mod.rs":"d8f1b94002c0448edbbb3ea21f5ac20306bced3daf82424afc665efffe6f7808","src/windows.rs":"acccbd341e81206cb1dc66af41762c193ac0dd719d700b64f7e26c967ee7d670"},"package":"88ee81885f9f04bff991e306fea7c1c60a5f0f9e409e99f6b40e3311a3363135"} \ No newline at end of file diff -Nru cargo-0.17.0/vendor/libc-0.2.21/Cargo.toml cargo-0.19.0/vendor/libc-0.2.21/Cargo.toml --- cargo-0.17.0/vendor/libc-0.2.21/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/Cargo.toml 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,21 @@ +[package] + +name = "libc" +version = "0.2.21" +authors = ["The Rust Project Developers"] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/rust-lang/libc" +homepage = "https://github.com/rust-lang/libc" +documentation = "http://doc.rust-lang.org/libc" +description = """ +A library for types and bindings to native C functions often found in libc or +other common platform libraries. +""" + +[features] +default = ["use_std"] +use_std = [] + +[workspace] +members = ["libc-test", "libc-test/generate-files"] diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/android-accept-licenses.sh cargo-0.19.0/vendor/libc-0.2.21/ci/android-accept-licenses.sh --- cargo-0.17.0/vendor/libc-0.2.21/ci/android-accept-licenses.sh 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/android-accept-licenses.sh 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,15 @@ +#!/usr/bin/expect -f +# ignore-license + +set timeout 1800 +set cmd [lindex $argv 0] +set licenses [lindex $argv 1] + +spawn {*}$cmd +expect { + "Do you accept the license '*'*" { + exp_send "y\r" + exp_continue + } + eof +} diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/android-install-ndk.sh cargo-0.19.0/vendor/libc-0.2.21/ci/android-install-ndk.sh --- cargo-0.17.0/vendor/libc-0.2.21/ci/android-install-ndk.sh 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/android-install-ndk.sh 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,36 @@ +#!/bin/sh +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex + +curl -O https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip +unzip -q android-ndk-r13b-linux-x86_64.zip + +case "$1" in + aarch64) + arch=arm64 + ;; + + i686) + arch=x86 + ;; + + *) + arch=$1 + ;; +esac; + +android-ndk-r13b/build/tools/make_standalone_toolchain.py \ + --install-dir /android/ndk-$1 \ + --arch $arch \ + --api 24 + +rm -rf ./android-ndk-r13b-linux-x86_64.zip ./android-ndk-r13b diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/android-install-sdk.sh cargo-0.19.0/vendor/libc-0.2.21/ci/android-install-sdk.sh --- cargo-0.17.0/vendor/libc-0.2.21/ci/android-install-sdk.sh 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/android-install-sdk.sh 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,53 @@ +#!/bin/sh +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex + +# Prep the SDK and emulator +# +# Note that the update process requires that we accept a bunch of licenses, and +# we can't just pipe `yes` into it for some reason, so we take the same strategy +# located in https://github.com/appunite/docker by just wrapping it in a script +# which apparently magically accepts the licenses. + +mkdir sdk +curl https://dl.google.com/android/repository/tools_r25.2.5-linux.zip -O +unzip -d sdk tools_r25.2.5-linux.zip + +filter="platform-tools,android-24" + +case "$1" in + arm | armv7) + abi=armeabi-v7a + ;; + + aarch64) + abi=arm64-v8a + ;; + + i686) + abi=x86 + ;; + + *) + echo "invalid arch: $1" + exit 1 + ;; +esac; + +filter="$filter,sys-img-$abi-android-24" + +./android-accept-licenses.sh "android - update sdk -a --no-ui --filter $filter" + +echo "no" | android create avd \ + --name $1 \ + --target android-24 \ + --abi $abi diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/aarch64-linux-android/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/aarch64-linux-android/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/aarch64-linux-android/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/aarch64-linux-android/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,32 @@ +FROM ubuntu:16.04 + +RUN dpkg --add-architecture i386 && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + file \ + curl \ + ca-certificates \ + python \ + unzip \ + expect \ + openjdk-9-jre \ + libstdc++6:i386 \ + libpulse0 \ + gcc \ + libc6-dev + +WORKDIR /android/ +COPY android* /android/ + +ENV ANDROID_ARCH=aarch64 +ENV PATH=$PATH:/android/ndk-$ANDROID_ARCH/bin:/android/sdk/tools:/android/sdk/platform-tools + +RUN sh /android/android-install-ndk.sh $ANDROID_ARCH +RUN sh /android/android-install-sdk.sh $ANDROID_ARCH +RUN mv /root/.android /tmp +RUN chmod 777 -R /tmp/.android +RUN chmod 755 /android/sdk/tools/* /android/sdk/tools/qemu/linux-x86_64/* + +ENV PATH=$PATH:/rust/bin \ + CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER=aarch64-linux-android-gcc \ + HOME=/tmp diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/aarch64-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/aarch64-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/aarch64-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/aarch64-unknown-linux-gnu/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,7 @@ +FROM ubuntu:16.10 +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev ca-certificates \ + gcc-aarch64-linux-gnu libc6-dev-arm64-cross qemu-user +ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ + PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/arm-linux-androideabi/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/arm-linux-androideabi/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/arm-linux-androideabi/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/arm-linux-androideabi/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,32 @@ +FROM ubuntu:16.04 + +RUN dpkg --add-architecture i386 && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + file \ + curl \ + ca-certificates \ + python \ + unzip \ + expect \ + openjdk-9-jre \ + libstdc++6:i386 \ + libpulse0 \ + gcc \ + libc6-dev + +WORKDIR /android/ +COPY android* /android/ + +ENV ANDROID_ARCH=arm +ENV PATH=$PATH:/android/ndk-$ANDROID_ARCH/bin:/android/sdk/tools:/android/sdk/platform-tools + +RUN sh /android/android-install-ndk.sh $ANDROID_ARCH +RUN sh /android/android-install-sdk.sh $ANDROID_ARCH +RUN mv /root/.android /tmp +RUN chmod 777 -R /tmp/.android +RUN chmod 755 /android/sdk/tools/* /android/sdk/tools/qemu/linux-x86_64/* + +ENV PATH=$PATH:/rust/bin \ + CARGO_TARGET_ARM_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc \ + HOME=/tmp diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,7 @@ +FROM ubuntu:16.10 +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev ca-certificates \ + gcc-arm-linux-gnueabihf libc6-dev-armhf-cross qemu-user +ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ + PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/i686-linux-android/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/i686-linux-android/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/i686-linux-android/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/i686-linux-android/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,32 @@ +FROM ubuntu:16.04 + +RUN dpkg --add-architecture i386 && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + file \ + curl \ + ca-certificates \ + python \ + unzip \ + expect \ + openjdk-9-jre \ + libstdc++6:i386 \ + libpulse0 \ + gcc \ + libc6-dev + +WORKDIR /android/ +COPY android* /android/ + +ENV ANDROID_ARCH=i686 +ENV PATH=$PATH:/android/ndk-$ANDROID_ARCH/bin:/android/sdk/tools:/android/sdk/platform-tools + +RUN sh /android/android-install-ndk.sh $ANDROID_ARCH +RUN sh /android/android-install-sdk.sh $ANDROID_ARCH +RUN mv /root/.android /tmp +RUN chmod 777 -R /tmp/.android +RUN chmod 755 /android/sdk/tools/* /android/sdk/tools/qemu/linux-x86_64/* + +ENV PATH=$PATH:/rust/bin \ + CARGO_TARGET_I686_LINUX_ANDROID_LINKER=i686-linux-android-gcc \ + HOME=/tmp diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/i686-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/i686-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/i686-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/i686-unknown-linux-gnu/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,5 @@ +FROM ubuntu:16.10 +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc-multilib libc6-dev ca-certificates +ENV PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/i686-unknown-linux-musl/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/i686-unknown-linux-musl/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/i686-unknown-linux-musl/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/i686-unknown-linux-musl/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,23 @@ +FROM ubuntu:16.10 + +RUN dpkg --add-architecture i386 +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc-multilib make libc6-dev git curl ca-certificates libc6:i386 +# Below we're cross-compiling musl for i686 using the system compiler on an +# x86_64 system. This is an awkward thing to be doing and so we have to jump +# through a couple hoops to get musl to be happy. In particular: +# +# * We specifically pass -m32 in CFLAGS and override CC when running ./configure, +# since otherwise the script will fail to find a compiler. +# * We manually unset CROSS_COMPILE when running make; otherwise the makefile +# will call the non-existent binary 'i686-ar'. +RUN curl https://www.musl-libc.org/releases/musl-1.1.15.tar.gz | \ + tar xzf - && \ + cd musl-1.1.15 && \ + CC=gcc CFLAGS=-m32 ./configure --prefix=/musl-i686 --disable-shared --target=i686 && \ + make CROSS_COMPILE= install -j4 && \ + cd .. && \ + rm -rf musl-1.1.15 +ENV PATH=$PATH:/musl-i686/bin:/rust/bin \ + CC_i686_unknown_linux_musl=musl-gcc diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,11 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates \ + gcc-mips64-linux-gnuabi64 libc6-dev-mips64-cross \ + qemu-system-mips64 + +ENV CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_LINKER=mips64-linux-gnuabi64-gcc \ + CC_mips64_unknown_linux_gnuabi64=mips64-linux-gnuabi64-gcc \ + PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/mipsel-unknown-linux-musl/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/mipsel-unknown-linux-musl/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/mipsel-unknown-linux-musl/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/mipsel-unknown-linux-musl/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,17 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates qemu-system-mips curl \ + bzip2 + +RUN mkdir /toolchain + +# Note that this originally came from: +# https://downloads.openwrt.org/snapshots/trunk/malta/generic/OpenWrt-Toolchain-malta-le_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 +RUN curl -L https://s3.amazonaws.com/rust-lang-ci/libc/OpenWrt-Toolchain-malta-le_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 | \ + tar xjf - -C /toolchain --strip-components=2 + +ENV PATH=$PATH:/rust/bin:/toolchain/bin \ + CC_mipsel_unknown_linux_musl=mipsel-openwrt-linux-gcc \ + CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_MUSL_LINKER=mipsel-openwrt-linux-gcc diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/mips-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/mips-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/mips-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/mips-unknown-linux-gnu/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,10 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates \ + gcc-mips-linux-gnu libc6-dev-mips-cross \ + qemu-system-mips + +ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_LINKER=mips-linux-gnu-gcc \ + PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/mips-unknown-linux-musl/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/mips-unknown-linux-musl/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/mips-unknown-linux-musl/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/mips-unknown-linux-musl/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,17 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates qemu-system-mips curl \ + bzip2 + +RUN mkdir /toolchain + +# Note that this originally came from: +# https://downloads.openwrt.org/snapshots/trunk/ar71xx/generic/OpenWrt-SDK-ar71xx-generic_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 +RUN curl -L https://s3.amazonaws.com/rust-lang-ci/libc/OpenWrt-SDK-ar71xx-generic_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 | \ + tar xjf - -C /toolchain --strip-components=1 + +ENV PATH=$PATH:/rust/bin:/toolchain/staging_dir/toolchain-mips_34kc_gcc-5.3.0_musl-1.1.15/bin \ + CC_mips_unknown_linux_musl=mips-openwrt-linux-gcc \ + CARGO_TARGET_MIPS_UNKNOWN_LINUX_MUSL_LINKER=mips-openwrt-linux-gcc diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,11 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates \ + gcc-powerpc64-linux-gnu libc6-dev-ppc64-cross \ + qemu-system-ppc + +ENV CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_LINKER=powerpc64-linux-gnu-gcc \ + CC=powerpc64-linux-gnu-gcc \ + PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/powerpc-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/powerpc-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/powerpc-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/powerpc-unknown-linux-gnu/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,10 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates \ + gcc-powerpc-linux-gnu libc6-dev-powerpc-cross \ + qemu-system-ppc + +ENV CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_LINKER=powerpc-linux-gnu-gcc \ + PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/x86_64-rumprun-netbsd/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/x86_64-rumprun-netbsd/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/x86_64-rumprun-netbsd/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/x86_64-rumprun-netbsd/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,6 @@ +FROM mato/rumprun-toolchain-hw-x86_64 +USER root +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + qemu +ENV PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-freebsd/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-freebsd/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-freebsd/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-freebsd/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,13 @@ +FROM alexcrichton/rust-slave-linux-cross:2016-04-15 +USER root + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + qemu genext2fs + +ENTRYPOINT ["sh"] + +ENV PATH=$PATH:/rust/bin \ + QEMU=2016-11-06/freebsd.qcow2.gz \ + CAN_CROSS=1 \ + CARGO_TARGET_X86_64_UNKNOWN_FREEBSD_LINKER=x86_64-unknown-freebsd10-gcc diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-linux-gnu/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-linux-gnu/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-linux-gnu/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-linux-gnu/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,5 @@ +FROM ubuntu:16.10 +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev ca-certificates +ENV PATH=$PATH:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-linux-musl/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-linux-musl/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-linux-musl/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-linux-musl/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,13 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc make libc6-dev git curl ca-certificates +RUN curl https://www.musl-libc.org/releases/musl-1.1.15.tar.gz | \ + tar xzf - && \ + cd musl-1.1.15 && \ + ./configure --prefix=/musl-x86_64 && \ + make install -j4 && \ + cd .. && \ + rm -rf musl-1.1.15 +ENV PATH=$PATH:/musl-x86_64/bin:/rust/bin diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-openbsd/Dockerfile cargo-0.19.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-openbsd/Dockerfile --- cargo-0.17.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-openbsd/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/docker/x86_64-unknown-openbsd/Dockerfile 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,8 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu curl ca-certificates \ + genext2fs +ENV PATH=$PATH:/rust/bin \ + QEMU=2016-11-06/openbsd-6.0-without-pkgs.qcow2 diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/dox.sh cargo-0.19.0/vendor/libc-0.2.21/ci/dox.sh --- cargo-0.17.0/vendor/libc-0.2.21/ci/dox.sh 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/dox.sh 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,33 @@ +#!/bin/sh + +# Builds documentation for all target triples that we have a registered URL for +# in liblibc. This scrapes the list of triples to document from `src/lib.rs` +# which has a bunch of `html_root_url` directives we pick up. + +set -e + +TARGETS=`grep html_root_url src/lib.rs | sed 's/.*".*\/\(.*\)"/\1/'` + +rm -rf target/doc +mkdir -p target/doc + +cp ci/landing-page-head.html target/doc/index.html + +for target in $TARGETS; do + echo documenting $target + + rustdoc -o target/doc/$target --target $target src/lib.rs --cfg dox \ + --crate-name libc + + echo "
    • $target
    • " \ + >> target/doc/index.html +done + +cat ci/landing-page-footer.html >> target/doc/index.html + +# If we're on travis, not a PR, and on the right branch, publish! +if [ "$TRAVIS_PULL_REQUEST" = "false" ] && [ "$TRAVIS_BRANCH" = "master" ]; then + pip install ghp_import --install-option="--prefix=$HOME/.local" + $HOME/.local/bin/ghp-import -n target/doc + git push -qf https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages +fi diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/ios/deploy_and_run_on_ios_simulator.rs cargo-0.19.0/vendor/libc-0.2.21/ci/ios/deploy_and_run_on_ios_simulator.rs --- cargo-0.17.0/vendor/libc-0.2.21/ci/ios/deploy_and_run_on_ios_simulator.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/ios/deploy_and_run_on_ios_simulator.rs 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,171 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// This is a script to deploy and execute a binary on an iOS simulator. +// The primary use of this is to be able to run unit tests on the simulator and +// retrieve the results. +// +// To do this through Cargo instead, use Dinghy +// (https://github.com/snipsco/dinghy): cargo dinghy install, then cargo dinghy +// test. + +use std::env; +use std::fs::{self, File}; +use std::io::Write; +use std::path::Path; +use std::process; +use std::process::Command; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with: {}", stringify!($e), e), + }) +} + +// Step one: Wrap as an app +fn package_as_simulator_app(crate_name: &str, test_binary_path: &Path) { + println!("Packaging simulator app"); + drop(fs::remove_dir_all("ios_simulator_app")); + t!(fs::create_dir("ios_simulator_app")); + t!(fs::copy(test_binary_path, + Path::new("ios_simulator_app").join(crate_name))); + + let mut f = t!(File::create("ios_simulator_app/Info.plist")); + t!(f.write_all(format!(r#" + + + + + CFBundleExecutable + {} + CFBundleIdentifier + com.rust.unittests + + + "#, crate_name).as_bytes())); +} + +// Step two: Start the iOS simulator +fn start_simulator() { + println!("Looking for iOS simulator"); + let output = t!(Command::new("xcrun").arg("simctl").arg("list").output()); + assert!(output.status.success()); + let mut simulator_exists = false; + let mut simulator_booted = false; + let mut found_rust_sim = false; + let stdout = t!(String::from_utf8(output.stdout)); + for line in stdout.lines() { + if line.contains("rust_ios") { + if found_rust_sim { + panic!("Duplicate rust_ios simulators found. Please \ + double-check xcrun simctl list."); + } + simulator_exists = true; + simulator_booted = line.contains("(Booted)"); + found_rust_sim = true; + } + } + + if simulator_exists == false { + println!("Creating iOS simulator"); + Command::new("xcrun") + .arg("simctl") + .arg("create") + .arg("rust_ios") + .arg("com.apple.CoreSimulator.SimDeviceType.iPhone-SE") + .arg("com.apple.CoreSimulator.SimRuntime.iOS-10-2") + .check_status(); + } else if simulator_booted == true { + println!("Shutting down already-booted simulator"); + Command::new("xcrun") + .arg("simctl") + .arg("shutdown") + .arg("rust_ios") + .check_status(); + } + + println!("Starting iOS simulator"); + // We can't uninstall the app (if present) as that will hang if the + // simulator isn't completely booted; just erase the simulator instead. + Command::new("xcrun").arg("simctl").arg("erase").arg("rust_ios").check_status(); + Command::new("xcrun").arg("simctl").arg("boot").arg("rust_ios").check_status(); +} + +// Step three: Install the app +fn install_app_to_simulator() { + println!("Installing app to simulator"); + Command::new("xcrun") + .arg("simctl") + .arg("install") + .arg("booted") + .arg("ios_simulator_app/") + .check_status(); +} + +// Step four: Run the app +fn run_app_on_simulator() { + println!("Running app"); + let output = t!(Command::new("xcrun") + .arg("simctl") + .arg("launch") + .arg("--console") + .arg("booted") + .arg("com.rust.unittests") + .output()); + + println!("stdout --\n{}\n", String::from_utf8_lossy(&output.stdout)); + println!("stderr --\n{}\n", String::from_utf8_lossy(&output.stderr)); + + let stdout = String::from_utf8_lossy(&output.stdout); + let passed = stdout.lines() + .find(|l| l.contains("PASSED")) + .map(|l| l.contains("tests")) + .unwrap_or(false); + + println!("Shutting down simulator"); + Command::new("xcrun") + .arg("simctl") + .arg("shutdown") + .arg("rust_ios") + .check_status(); + if !passed { + panic!("tests didn't pass"); + } +} + +trait CheckStatus { + fn check_status(&mut self); +} + +impl CheckStatus for Command { + fn check_status(&mut self) { + println!("\trunning: {:?}", self); + assert!(t!(self.status()).success()); + } +} + +fn main() { + let args: Vec = env::args().collect(); + if args.len() != 2 { + println!("Usage: {} ", args[0]); + process::exit(-1); + } + + let test_binary_path = Path::new(&args[1]); + let crate_name = test_binary_path.file_name().unwrap(); + + package_as_simulator_app(crate_name.to_str().unwrap(), test_binary_path); + start_simulator(); + install_app_to_simulator(); + run_app_on_simulator(); +} diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/landing-page-footer.html cargo-0.19.0/vendor/libc-0.2.21/ci/landing-page-footer.html --- cargo-0.17.0/vendor/libc-0.2.21/ci/landing-page-footer.html 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/landing-page-footer.html 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,3 @@ +
    + + diff -Nru cargo-0.17.0/vendor/libc-0.2.21/ci/landing-page-head.html cargo-0.19.0/vendor/libc-0.2.21/ci/landing-page-head.html --- cargo-0.17.0/vendor/libc-0.2.21/ci/landing-page-head.html 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.19.0/vendor/libc-0.2.21/ci/landing-page-head.html 2017-08-16 09:07:18.000000000 +0000 @@ -0,0 +1,7 @@ + + + + + + +